diff --git a/go.mod b/go.mod index b43cdbc1f9d..e9ac7be386d 100644 --- a/go.mod +++ b/go.mod @@ -23,9 +23,9 @@ require ( github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/gorilla/mux v1.8.0 - github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 + github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/hashicorp/consul/api v1.14.0 + github.com/hashicorp/consul/api v1.15.2 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.3.1 @@ -42,13 +42,13 @@ require ( github.com/prometheus/client_golang v1.13.0 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.37.0 - github.com/prometheus/prometheus v1.8.2-0.20220616150932-0d0e44d7e65f + github.com/prometheus/prometheus v0.39.1 github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.5.0 github.com/spf13/afero v1.6.0 github.com/stretchr/testify v1.8.0 - github.com/thanos-io/objstore v0.0.0-20220923084403-cec51c61948b - github.com/thanos-io/thanos v0.27.0-rc.0.0.20220929152722-4d764443cde3 + github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 + github.com/thanos-io/thanos v0.27.0-rc.0.0.20221007170555-a4e334152549 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae go.etcd.io/etcd/api/v3 v3.5.4 @@ -57,25 +57,25 @@ require ( go.opentelemetry.io/contrib/propagators/aws v1.9.0 go.opentelemetry.io/otel v1.10.0 go.opentelemetry.io/otel/bridge/opentracing v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0 - go.opentelemetry.io/otel/sdk v1.9.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 + go.opentelemetry.io/otel/sdk v1.10.0 go.opentelemetry.io/otel/trace v1.10.0 - go.uber.org/atomic v1.9.0 - golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 - google.golang.org/grpc v1.48.0 + go.uber.org/atomic v1.10.0 + golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 + golang.org/x/sync v0.0.0-20220907140024-f12130a52804 + golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 + google.golang.org/grpc v1.49.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go v0.102.0 // indirect + cloud.google.com/go v0.104.0 // indirect cloud.google.com/go/compute v1.7.0 // indirect cloud.google.com/go/iam v0.3.0 // indirect - cloud.google.com/go/storage v1.22.1 // indirect + cloud.google.com/go/storage v1.27.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect @@ -85,7 +85,7 @@ require ( github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go v1.44.72 // indirect + github.com/aws/aws-sdk-go v1.44.109 // indirect github.com/aws/aws-sdk-go-v2 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect @@ -128,15 +128,13 @@ require ( github.com/golang-jwt/jwt v3.2.1+incompatible // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/go-cmp v0.5.8 // indirect - github.com/google/pprof v0.0.0-20220729232143-a41b82acbcb1 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect - github.com/googleapis/gnostic v0.0.0-00010101000000-000000000000 // indirect - github.com/googleapis/go-type-adapters v1.0.0 // indirect + github.com/googleapis/gax-go/v2 v2.5.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v0.16.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -187,26 +185,29 @@ require ( github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a // indirect github.com/weaveworks/promrus v1.2.0 // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect - go.mongodb.org/mongo-driver v1.10.0 // indirect + go.mongodb.org/mongo-driver v1.10.2 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 // indirect - go.opentelemetry.io/contrib/propagators/ot v1.4.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/proto/otlp v0.18.0 // indirect - go.uber.org/goleak v1.1.12 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 // indirect + go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.9.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.9.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect + go.opentelemetry.io/otel/metric v0.32.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.uber.org/goleak v1.2.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 // indirect - golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect + golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect + golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.12 // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect - google.golang.org/api v0.91.0 // indirect + google.golang.org/api v0.97.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 // indirect + google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.66.6 // indirect @@ -215,10 +216,6 @@ require ( // Override since git.apache.org is down. The docs say to fetch from github. replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 -replace k8s.io/client-go => k8s.io/client-go v0.21.3 - -replace k8s.io/api => k8s.io/api v0.21.3 - // Use fork of gocql that has gokit logs and Prometheus metrics. replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 diff --git a/go.sum b/go.sum index a13868e34b4..52cd53bc38c 100644 --- a/go.sum +++ b/go.sum @@ -27,8 +27,9 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0 h1:DAq3r8y4mDgyB/ZPJ9v/5VJNqjgJAxTn6ZYLlUywOu8= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -55,8 +56,9 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1 h1:F6IlQJZrZM++apn9V5/VfS3gbTUYg98PS3EMQAzqtfg= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw= @@ -70,8 +72,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXi github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= -github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= +github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= @@ -132,8 +134,8 @@ github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.72 h1:i7J5XT7pjBjtl1OrdIhiQHzsG89wkZCcM1HhyK++3DI= -github.com/aws/aws-sdk-go v1.44.72/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.109 h1:+Na5JPeS0kiEHoBp5Umcuuf+IDqXqD0lXnM920E31YI= +github.com/aws/aws-sdk-go v1.44.109/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= @@ -169,7 +171,6 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -191,8 +192,8 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4 github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -229,19 +230,18 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/digitalocean/godo v1.80.0 h1:ZULJ/fWDM97YtO7Fa+K6hzJLd7+smCu4N+0n+B/xtj4= +github.com/digitalocean/godo v1.84.1 h1:VgPsuxhrO9pUygvij6qOhqXfAkxAsDZYRpmjSDMEaHo= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.16+incompatible h1:2Db6ZR/+FUR3hqPMwnogOPHFn405crbpxvWzKovETOQ= +github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -256,10 +256,11 @@ github.com/efficientgo/core v1.0.0-rc.0 h1:jJoA0N+C4/knWYVZ6GrdHOtDyrg8Y/TR4vFpT github.com/efficientgo/e2e v0.13.1-0.20220923082810-8fa9daa8af8a h1:cnJajqeh/HjvJLhI3wPvWG9OQ4gU79+4pELRD5Pkih8= github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd h1:svR6KxSP1xiPw10RN4Pd7g6BAVkEcNN628PAqZH31mM= github.com/efficientgo/tools/core v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8= +github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -272,7 +273,6 @@ github.com/felixge/fgprof v0.9.2/go.mod h1:+VNi+ZXtHIQ6wIw6bUT8nXQRefQflWECoFyRe github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= @@ -341,7 +341,7 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= -github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -436,8 +436,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -451,8 +449,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -479,8 +478,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20220729232143-a41b82acbcb1 h1:8pyqKJvrJqUYaKS851Ule26pwWvey6IDMiczaBLDKLQ= -github.com/google/pprof v0.0.0-20220729232143-a41b82acbcb1/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk= +github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 h1:ykKxL12NZd3JmWZnyqarJGsF73M9Xhtrik/FEtEeFRE= +github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -496,11 +495,11 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= +github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4= +github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -509,11 +508,12 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b h1:UlCBLaqvS4wVYNrMKSfqTBVsed/EOY9dnenhYZMUnrA= github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= -github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= +github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -524,16 +524,17 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 h1:ERKrevVTnCw3Wu4I3mtR15QU3gtWy86cBo6De0jEohg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2/go.mod h1:chrfS3YoLAlKTRE5cFWvCbt8uGAjshktT4PveTUpsFQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.14.0 h1:Y64GIJ8hYTu+tuGekwO4G4ardXoiCivX9wv1iP/kihk= -github.com/hashicorp/consul/api v1.14.0/go.mod h1:bcaw5CSZ7NE9qfOfKCI1xb7ZKjzu/MyvQkCLTfqLqxQ= +github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0= +github.com/hashicorp/consul/api v1.15.2/go.mod h1:v6nvB10borjOuIwNRZYPZiHKrTM/AyrGtd0WVVodKM8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.10.0 h1:rGLEh2AWK4K0KCMvqWAz2EYxQqgciIfMagWZ0nVe5MI= -github.com/hashicorp/consul/sdk v0.10.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -578,10 +579,11 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf h1:l/EZ57iRPNs8vd8c9qH0dB4Q+IiZHJouLAgxJ5j25tU= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hetznercloud/hcloud-go v1.33.2 h1:ptWKVYLW7YtjXzsqTFKFxwpVo3iM9UMkVPBYQE4teLU= +github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -635,7 +637,7 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= +github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd h1:b1taQnM42dp3NdiiQwfmM1WyyucHayZSKN5R0PRYWL0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -663,7 +665,7 @@ github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.6.0 h1:y3KgXttj0v6V3HyGtsvdkTl0gIzaAAOdrDXCIwGeh2g= +github.com/linode/linodego v1.9.1 h1:29UpEPpYcGFnbwiJW8mbk/bjBZpgd/pv68io2IKTo34= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -733,6 +735,7 @@ github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -850,8 +853,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v1.8.2-0.20220616150932-0d0e44d7e65f h1:FQYlZYOQo9rCqO+qzyf44b7rF/QVjAW8AfcBdacbRH8= -github.com/prometheus/prometheus v1.8.2-0.20220616150932-0d0e44d7e65f/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= +github.com/prometheus/prometheus v0.39.1 h1:abZM6A+sKAv2eKTbRIaHq4amM/nT07MuxRm0+QTaTj0= +github.com/prometheus/prometheus v0.39.1/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -904,7 +907,6 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -915,7 +917,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -924,15 +925,15 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20220923084403-cec51c61948b h1:P+MnJn+NoU6N2v7wLexTVRCu6ZvcOdPU4L8f2dgEfiY= -github.com/thanos-io/objstore v0.0.0-20220923084403-cec51c61948b/go.mod h1:Vx5dZs9ElxEhNLnum/OgB0pNTqNdI2zdXL82BeJr3T4= -github.com/thanos-io/thanos v0.27.0-rc.0.0.20220929152722-4d764443cde3 h1:BZt0CM16y9IwS3JZ6fTeeHkpJe+VAWqi7514CuMzFi8= -github.com/thanos-io/thanos v0.27.0-rc.0.0.20220929152722-4d764443cde3/go.mod h1:Fqowx8w9O6Vp1RJT6icLPcTIWSZiLkrN77P1e8sICns= +github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 h1:9dceDSKKsLWNHjrMpyzK1t7eVcAZv9Dp3FX+uokUS2Y= +github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604/go.mod h1:Vx5dZs9ElxEhNLnum/OgB0pNTqNdI2zdXL82BeJr3T4= +github.com/thanos-io/thanos v0.27.0-rc.0.0.20221007170555-a4e334152549 h1:zsMUSPJvOZ9jADN0rDeOQj1MdaY5Bx3uGzf1Os93L+w= +github.com/thanos-io/thanos v0.27.0-rc.0.0.20221007170555-a4e334152549/go.mod h1:uO/S/3k1sxy96Z5iD+e//NRjK58X070Cj5fP2nFat3s= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -943,7 +944,7 @@ github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVK github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vultr/govultr/v2 v2.17.1 h1:UBmotwA0mkGtyJMakUF9jhLH/W3mN5wfGRn543i/BCA= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae h1:Z8YibUpdBEdCq8nwrYXJQ8vYooevbmEBIdFpseXK3/8= github.com/weaveworks/common v0.0.0-20220706100410-67d27ed40fae/go.mod h1:YfOOLoW1Q/jIIu0WLeSwgStmrKjuJEZSKTAUc+0KFvE= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= @@ -956,9 +957,6 @@ github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6 github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= @@ -983,8 +981,9 @@ go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= +go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -995,44 +994,49 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 h1:9NkMW03wwEzPtP/KciZ4Ozu/Uz5ZA7kfqXJIObnrjGU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0/go.mod h1:548ZsYzmT4PL4zWKRd8q/N4z0Wxzn/ZxUE+lkEpwWQA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 h1:qZ3KzA4qPzLBDtQyPk4ydjlg8zvXbNysnFHaVMKJbVo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0/go.mod h1:14Oo79mRwusSI02L0EfG3Gp1uF3+1wSL+D4zDysxyqs= +go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 h1:S1iBWYnf1iqK4O/qnjUhQ2MMNis/h5+LeB/51+uzGHI= +go.opentelemetry.io/contrib/propagators/autoprop v0.34.0/go.mod h1:lJppyBme+d8vGNukA9sHdlKvw/q4i4c9JXx2RTpuHmM= go.opentelemetry.io/contrib/propagators/aws v1.9.0 h1:60BnkzNPdU3WD12oOGQNTjgbyws8iDggIaBWpghvcVo= go.opentelemetry.io/contrib/propagators/aws v1.9.0/go.mod h1:lYGAfTJZU1mo92QxtuiuNJjRyRyEWj1ldO1b0Vpc1I0= -go.opentelemetry.io/contrib/propagators/ot v1.4.0 h1:sHp8P5+xmMORvsgKjIPPX4U97JUgSqY4xPWa6ncF1PA= -go.opentelemetry.io/contrib/propagators/ot v1.4.0/go.mod h1:FivzsGJqC7ND++UUOifWfkiuEOFXtVQ3fh2ZkqIJ9X4= -go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= +go.opentelemetry.io/contrib/propagators/b3 v1.9.0 h1:Lzb9zU98jCE2kyfCjWfSSsiQoGtvBL+COxvUBf7FNhU= +go.opentelemetry.io/contrib/propagators/b3 v1.9.0/go.mod h1:fyx3gFXn+4w5uWTTiqaI8oBNBW/6w9Ow5zxXf7NGixU= +go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 h1:edJTgwezAtLKUINAXfjxllJ1vlsphNPV7RkuKNd/HkQ= +go.opentelemetry.io/contrib/propagators/jaeger v1.9.0/go.mod h1:Q/AXutvrBTfEDSeRLwOmKhyviX5adJvTesg6JFTybYg= +go.opentelemetry.io/contrib/propagators/ot v1.9.0 h1:+pYoqyFoA3H6EZ7Wie2ZQdqS4ZfG42PAGvj3eLUukHE= +go.opentelemetry.io/contrib/propagators/ot v1.9.0/go.mod h1:D2GfaecHHX67fXT93/5iKl2DArjt8+H0XWtFD8b4Z+k= go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= go.opentelemetry.io/otel/bridge/opentracing v1.10.0 h1:WzAVGovpC1s7KD5g4taU6BWYZP3QGSDVTlbRu9fIHw8= go.opentelemetry.io/otel/bridge/opentracing v1.10.0/go.mod h1:J7GLR/uxxqMAzZptsH0pjte3Ep4GacTCrbGBoDuHBqk= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0 h1:ggqApEjDKczicksfvZUCxuvoyDmR6Sbm56LwiK8DVR0= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0 h1:NN90Cuna0CnBg8YNu1Q0V35i2E8LDByFOwHRCq/ZP9I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0/go.mod h1:0EsCXjZAiiZGnLdEUXM9YjCKuuLZMYyglh2QDXcYKVA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0 h1:M0/hqGuJBLeIEu20f89H74RGtqV2dn+SFWEz9ATAAwY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0/go.mod h1:K5G92gbtCrYJ0mn6zj9Pst7YFsDFuvSYEhYKRMcufnM= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/sdk v1.9.0 h1:LNXp1vrr83fNXTHgU8eO89mhzxb/bbWAsHG6fNf3qWo= -go.opentelemetry.io/otel/sdk v1.9.0/go.mod h1:AEZc8nt5bd2F7BC24J5R0mrjYnpEgYHyTcM/vrSple4= -go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +go.opentelemetry.io/otel/metric v0.32.0 h1:lh5KMDB8xlMM4kwE38vlZJ3rZeiWrjw3As1vclfC01k= +go.opentelemetry.io/otel/metric v0.32.0/go.mod h1:PVDNTt297p8ehm949jsIzd+Z2bIZJYQQG/uuHTeWFHY= +go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= -go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -1154,7 +1158,6 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1165,8 +1168,9 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced h1:3dYNDff0VT5xj+mbj2XucFst9WKk6PdGOrb9n+SbIvw= -golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 h1:asZqf0wXastQr+DudYagQS8uBO8bHKeYD1vbAvGmFL8= +golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1189,9 +1193,9 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 h1:dtndE8FcEta75/4kHF3AbpuWzV6f1LjnLrM4pe2SZrw= -golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1205,8 +1209,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= +golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1298,11 +1302,11 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= -golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1321,8 +1325,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 h1:yuLAip3bfURHClMG9VBdzPrQvCWjWiWUTBGV+/fCbUs= +golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1447,8 +1451,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.91.0 h1:731+JzuwaJoZXRQGmPoBiV+SrsAfUaIkdMCWTcQNPyA= -google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.97.0 h1:x/vEL1XDF/2V4xzdNgFPaKHluRESo2aTsL7QzHnBtGQ= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1523,7 +1527,6 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= @@ -1542,8 +1545,8 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 h1:7PEE9xCtufpGJzrqweakEEnTh7YFELmnKm/ee+5jmfQ= -google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -1607,19 +1610,19 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= -k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= -k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg= +k8s.io/api v0.25.1 h1:yL7du50yc93k17nH/Xe9jujAYrcDkI/i5DL1jPz4E3M= +k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI= +k8s.io/client-go v0.25.1 h1:uFj4AJKtE1/ckcSKz8IhgAuZTdRXZDKev8g387ndD58= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/klog/v2 v2.80.0 h1:lyJt0TWMPaGoODa8B8bUuxgHS3W/m/bNr2cca3brA/g= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 1cb60b1ed09..3ac53ae77da 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -1631,7 +1631,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1657,7 +1657,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1682,7 +1682,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1710,7 +1710,7 @@ func BenchmarkDistributor_Push(b *testing.B) { // Add a label with a very long name. lbls.Set(fmt.Sprintf("xxx_%0.2000d", 1), "xxx") - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1738,7 +1738,7 @@ func BenchmarkDistributor_Push(b *testing.B) { // Add a label with a very long value. lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1)) - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1766,7 +1766,7 @@ func BenchmarkDistributor_Push(b *testing.B) { // Add a label with a very long value. lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1)) - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), @@ -1792,7 +1792,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().Add(-2*time.Hour).UnixNano() / int64(time.Millisecond), @@ -1817,7 +1817,7 @@ func BenchmarkDistributor_Push(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().Add(time.Hour).UnixNano() / int64(time.Millisecond), @@ -2184,7 +2184,7 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } - metrics[i] = lbls.Labels() + metrics[i] = lbls.Labels(nil) samples[i] = cortexpb.Sample{ Value: float64(i), TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), diff --git a/pkg/ingester/active_series.go b/pkg/ingester/active_series.go index 4e2d0e6e59d..ba3e2760d37 100644 --- a/pkg/ingester/active_series.go +++ b/pkg/ingester/active_series.go @@ -113,7 +113,7 @@ func (s *activeSeriesStripe) updateSeriesTimestamp(now time.Time, series labels. if !entryTimeSet { if prev := e.Load(); nowNanos > prev { - entryTimeSet = e.CAS(prev, nowNanos) + entryTimeSet = e.CompareAndSwap(prev, nowNanos) } } @@ -121,7 +121,7 @@ func (s *activeSeriesStripe) updateSeriesTimestamp(now time.Time, series labels. for prevOldest := s.oldestEntryTs.Load(); nowNanos < prevOldest; { // If recent purge already removed entries older than "oldest entry timestamp", setting this to 0 will make // sure that next purge doesn't take the shortcut route. - if s.oldestEntryTs.CAS(prevOldest, 0) { + if s.oldestEntryTs.CompareAndSwap(prevOldest, 0) { break } } diff --git a/pkg/ingester/active_series_test.go b/pkg/ingester/active_series_test.go index 32010eb898d..ab3c63db15d 100644 --- a/pkg/ingester/active_series_test.go +++ b/pkg/ingester/active_series_test.go @@ -60,8 +60,8 @@ func TestActiveSeries_Purge(t *testing.T) { func TestActiveSeries_PurgeOpt(t *testing.T) { metric := labels.NewBuilder(labels.FromStrings("__name__", "logs")) - ls1 := metric.Set("_", "ypfajYg2lsv").Labels() - ls2 := metric.Set("_", "KiqbryhzUpn").Labels() + ls1 := metric.Set("_", "ypfajYg2lsv").Labels(nil) + ls2 := metric.Set("_", "KiqbryhzUpn").Labels(nil) c := NewActiveSeries() diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 83ac6d52e81..da75a81bc38 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -336,7 +336,7 @@ func mockTSDB(t *testing.T, labels []labels.Labels, mint model.Time, samples int opts := tsdb.DefaultHeadOptions() opts.ChunkDirRoot = t.TempDir() // We use TSDB head only. By using full TSDB DB, and appending samples to it, closing it would cause unnecessary HEAD compaction, which slows down the test. - head, err := tsdb.NewHead(nil, nil, nil, opts, nil) + head, err := tsdb.NewHead(nil, nil, nil, nil, opts, nil) require.NoError(t, err) t.Cleanup(func() { _ = head.Close() diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index ebade343d84..ee35eb6b1df 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -470,5 +470,5 @@ func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label lb.Set(additionalL.Name, additionalL.Value) } - return lb.Labels() + return lb.Labels(nil) } diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c355f1596af..e4ec75232ef 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" @@ -86,6 +87,10 @@ func (a *PusherAppender) Commit() error { return err } +func (a *PusherAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + return 0, errors.New("update metadata unsupported") +} + func (a *PusherAppender) Rollback() error { a.labels = nil a.samples = nil diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 13f6f7ab4c2..d564a068b09 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -582,7 +582,7 @@ type failFirstGetBucket struct { } func (f *failFirstGetBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - if f.firstGet.CAS(false, true) { + if f.firstGet.CompareAndSwap(false, true) { return nil, errors.New("Get() request mocked error") } diff --git a/pkg/util/concurrency/runner_test.go b/pkg/util/concurrency/runner_test.go index 1dec972c4d6..54b171d5b1a 100644 --- a/pkg/util/concurrency/runner_test.go +++ b/pkg/util/concurrency/runner_test.go @@ -45,7 +45,7 @@ func TestForEachUser_ShouldContinueOnErrorButReturnIt(t *testing.T) { input := []string{"a", "b", "c"} err := ForEachUser(ctx, input, 2, func(ctx context.Context, user string) error { - if processed.CAS(0, 1) { + if processed.CompareAndSwap(0, 1) { return errors.New("the first request is failing") } @@ -103,7 +103,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationHandled(t *testing.T ) err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error { - if processed.CAS(0, 1) { + if processed.CompareAndSwap(0, 1) { return errors.New("the first request is failing") } @@ -140,7 +140,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *testing err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error { wg.Done() - if processed.CAS(0, 1) { + if processed.CompareAndSwap(0, 1) { // wait till two jobs have been started wg.Wait() return errors.New("the first request is failing") diff --git a/pkg/util/metrics_helper.go b/pkg/util/metrics_helper.go index 649200dd534..33b7fae5f1b 100644 --- a/pkg/util/metrics_helper.go +++ b/pkg/util/metrics_helper.go @@ -728,7 +728,7 @@ func FromLabelPairsToLabels(pairs []*dto.LabelPair) labels.Labels { for _, pair := range pairs { builder.Set(pair.GetName(), pair.GetValue()) } - return builder.Labels() + return builder.Labels(nil) } // GetSumOfHistogramSampleCount returns the sum of samples count of histograms matching the provided metric name @@ -794,7 +794,7 @@ nextMetric: lbls.Set(lp.GetName(), lp.GetValue()) } - result = append(result, lbls.Labels()) + result = append(result, lbls.Labels(nil)) } return result, errs.Err() diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index bbfc8390cbe..e33930b3c26 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,105 +1,111 @@ { "accessapproval": "1.3.0", "accesscontextmanager": "1.2.0", - "aiplatform": "1.10.0", - "analytics": "0.6.1", + "aiplatform": "1.17.0", + "analytics": "0.9.0", "apigateway": "1.2.0", "apigeeconnect": "1.2.0", + "apigeeregistry": "0.2.0", + "apikeys": "0.1.0", "appengine": "1.3.0", - "area120": "0.3.0", - "artifactregistry": "1.3.0", - "asset": "1.2.0", - "assuredworkloads": "0.6.0", - "automl": "1.3.0", + "area120": "0.4.0", + "artifactregistry": "1.4.0", + "asset": "1.4.0", + "assuredworkloads": "1.2.0", + "automl": "1.4.0", + "baremetalsolution": "0.2.0", + "batch": "0.1.0", + "beyondcorp": "0.1.0", "billing": "1.2.0", - "binaryauthorization": "0.5.0", - "certificatemanager": "0.2.0", - "channel": "1.6.0", + "binaryauthorization": "1.0.0", + "certificatemanager": "0.2.1", + "channel": "1.7.0", "cloudbuild": "1.2.0", "clouddms": "1.2.0", - "cloudtasks": "1.3.0", - "compute": "1.6.1", - "contactcenterinsights": "1.2.0", - "container": "1.2.0", - "containeranalysis": "0.3.0", - "datacatalog": "1.3.0", - "dataflow": "0.4.0", + "cloudtasks": "1.4.0", + "compute": "1.9.0", + "contactcenterinsights": "1.2.3", + "container": "1.3.1", + "containeranalysis": "0.4.0", + "datacatalog": "1.3.1", + "dataflow": "0.5.1", + "dataform": "0.2.0", "datafusion": "1.3.0", "datalabeling": "0.3.0", - "dataplex": "0.4.0", + "dataplex": "1.1.0", "dataproc": "1.5.0", - "dataqna": "0.3.0", - "datastream": "0.5.0", - "deploy": "1.2.0", - "dialogflow": "1.9.0", + "dataqna": "0.4.0", + "datastream": "1.0.0", + "deploy": "1.2.1", + "dialogflow": "1.12.1", "dlp": "1.4.0", - "documentai": "1.4.0", - "domains": "0.4.0", + "documentai": "1.5.0", + "domains": "0.5.0", "essentialcontacts": "1.2.0", "eventarc": "1.6.0", "filestore": "1.2.0", - "functions": "1.3.0", - "gaming": "1.2.0", + "functions": "1.5.0", + "gaming": "1.3.1", "gkebackup": "0.1.0", "gkeconnect": "0.3.0", - "gkehub": "0.5.0", + "gkehub": "0.8.0", "gkemulticloud": "0.2.0", "grafeas": "0.2.0", "gsuiteaddons": "1.2.0", "iam": "0.3.0", "iap": "1.3.0", - "ids": "0.3.0", + "ids": "1.0.0", "iot": "1.2.0", "kms": "1.4.0", - "language": "1.2.0", - "lifesciences": "0.3.0", + "language": "1.3.0", + "lifesciences": "0.4.0", "managedidentities": "1.2.0", "mediatranslation": "0.3.0", - "memcache": "1.2.0", - "metastore": "1.2.0", - "monitoring": "1.5.0", + "memcache": "1.3.0", + "metastore": "1.3.0", + "monitoring": "1.6.0", "networkconnectivity": "1.2.0", - "networkmanagement": "1.2.0", - "networksecurity": "0.3.0", - "notebooks": "0.4.0", - "optimization": "0.1.0", + "networkmanagement": "1.3.0", + "networksecurity": "0.3.1", + "notebooks": "1.0.0", + "optimization": "1.0.0", "orchestration": "1.2.0", "orgpolicy": "1.3.0", - "osconfig": "1.5.0", - "oslogin": "1.2.0", - "phishingprotection": "0.3.0", + "osconfig": "1.6.0", + "oslogin": "1.3.0", + "phishingprotection": "0.4.0", "policytroubleshooter": "1.2.0", - "privatecatalog": "0.3.0", - "recaptchaenterprise/v2": "2.0.0", - "recommendationengine": "0.2.0", - "recommender": "1.3.0", - "redis": "1.5.0", + "privatecatalog": "0.4.0", + "recaptchaenterprise/v2": "2.0.1", + "recommendationengine": "0.3.0", + "recommender": "1.4.0", + "redis": "1.6.0", "resourcemanager": "1.2.0", "resourcesettings": "1.2.0", - "retail": "1.3.0", + "retail": "1.5.0", "run": "0.1.1", - "scheduler": "1.2.0", - "secretmanager": "1.4.0", - "security": "1.4.0", - "securitycenter": "1.7.0", + "scheduler": "1.3.0", + "secretmanager": "1.5.0", + "security": "1.4.1", + "securitycenter": "1.10.0", "servicecontrol": "1.3.0", - "servicedirectory": "1.2.0", - "servicemanagement": "1.3.0", + "servicedirectory": "1.3.0", + "servicemanagement": "1.3.1", "serviceusage": "1.2.0", "shell": "1.2.0", - "speech": "1.4.0", + "speech": "1.5.0", "storagetransfer": "1.3.0", - "talent": "0.5.0", + "talent": "1.0.0", "texttospeech": "1.3.0", "tpu": "1.2.0", "trace": "1.2.0", "translate": "1.2.0", - "video": "1.4.0", - "videointelligence": "1.2.0", - "vision/v2": "2.0.0", - "vmmigration": "0.3.0", + "video": "1.7.0", + "videointelligence": "1.4.0", + "vision/v2": "2.1.0", + "vmmigration": "1.1.0", "vpcaccess": "1.2.0", - "webrisk": "1.2.0", + "webrisk": "1.3.0", "websecurityscanner": "1.2.0", - "workflows": "1.4.0" + "workflows": "1.5.0" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 9cd82e370f3..d9c9389061b 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.102.0" + ".": "0.104.0" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 06dec80a576..8d00d2e952d 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,26 @@ # Changes +## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24) + + +### Features + +* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490)) + +## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) + + +### Features + +* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) + +## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) + + +### Bug Fixes + +* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35)) + ## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 669cc753270..01453cc692a 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -35,6 +35,7 @@ For an updated list of all of our released APIs please see our Our libraries are compatible with at least the three most recent, major Go releases. They are currently compatible with: +- Go 1.19 - Go 1.18 - Go 1.17 - Go 1.16 diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 06463833e3e..871645ef111 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -17,14 +17,12 @@ Package cloud is the root of the packages used to access Google Cloud Services. See https://godoc.org/cloud.google.com/go for a full list of sub-packages. - -Client Options +# Client Options All clients in sub-packages are configurable via client options. These options are described here: https://godoc.org/google.golang.org/api/option. - -Authentication and Authorization +# Authentication and Authorization All the clients in sub-packages support authentication via Google Application Default Credentials (see https://cloud.google.com/docs/authentication/production), or @@ -35,11 +33,12 @@ and authenticate clients. For information on how to create and obtain Application Default Credentials, see https://cloud.google.com/docs/authentication/production. Here is an example of a client using ADC to authenticate: - client, err := secretmanager.NewClient(context.Background()) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background()) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. You can use a file with credentials to authenticate and authorize, such as a JSON key file associated with a Google service account. Service Account keys can be @@ -47,12 +46,13 @@ created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses the Secret Manger client, but the same steps apply to the other client libraries underneath this package. Example: - client, err := secretmanager.NewClient(context.Background(), - option.WithCredentialsFile("/path/to/service-account-key.json")) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background(), + option.WithCredentialsFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. In some cases (for instance, you don't want to store secrets on disk), you can create credentials from in-memory JSON and use the WithCredentials option. @@ -62,19 +62,19 @@ the other client libraries underneath this package. Note that scopes can be found at https://developers.google.com/identity/protocols/oauth2/scopes, and are also provided in all auto-generated libraries: for example, cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: - ctx := context.Background() - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) - if err != nil { - // TODO: handle error. - } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + ctx := context.Background() + creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) + if err != nil { + // TODO: handle error. + } + client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. -Timeouts and Cancellation +# Timeouts and Cancellation By default, non-streaming methods, like Create or Get, will have a default deadline applied to the context provided at call time, unless a context deadline is already set. Streaming @@ -83,40 +83,42 @@ arrange for cancellation, use contexts. Transient errors will be retried when correctness allows. Here is an example of how to set a timeout for an RPC, use context.WithTimeout: - ctx := context.Background() - // Do not set a timeout on the context passed to NewClient: dialing happens - // asynchronously, and the context is used to refresh credentials in the - // background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - // Time out if it takes more than 10 seconds to create a dataset. - tctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() // Always call cancel. - - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} - if err := client.DeleteSecret(tctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not set a timeout on the context passed to NewClient: dialing happens + // asynchronously, and the context is used to refresh credentials in the + // background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Time out if it takes more than 10 seconds to create a dataset. + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() // Always call cancel. + + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} + if err := client.DeleteSecret(tctx, req); err != nil { + // TODO: handle error. + } Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel: - ctx := context.Background() - // Do not cancel the context passed to NewClient: dialing happens asynchronously, - // and the context is used to refresh credentials in the background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - cctx, cancel := context.WithCancel(ctx) - defer cancel() // Always call cancel. - - // TODO: Make the cancel function available to whatever might want to cancel the - // call--perhaps a GUI button. - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} - if err := client.DeleteSecret(cctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not cancel the context passed to NewClient: dialing happens asynchronously, + // and the context is used to refresh credentials in the background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + cctx, cancel := context.WithCancel(ctx) + defer cancel() // Always call cancel. + + // TODO: Make the cancel function available to whatever might want to cancel the + // call--perhaps a GUI button. + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} + if err := client.DeleteSecret(cctx, req); err != nil { + // TODO: handle error. + } To opt out of default deadlines, set the temporary environment variable GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client @@ -130,8 +132,7 @@ timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts would be ineffective and would only interfere with credential refreshing, which uses the same context. - -Connection Pooling +# Connection Pooling Connection pooling differs in clients based on their transport. Cloud clients either rely on HTTP or gRPC transports to communicate @@ -147,23 +148,20 @@ of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a clie option to NewClient calls. This configures the underlying gRPC connections to be pooled and addressed in a round robin fashion. - -Using the Libraries with Docker +# Using the Libraries with Docker Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 for more information. - -Debugging +# Debugging To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See https://godoc.org/google.golang.org/grpc/grpclog for more information. For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". - -Inspecting errors +# Inspecting errors Most of the errors returned by the generated clients are wrapped in an `apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror) @@ -175,35 +173,38 @@ while debugging. `apierror.APIError` gives access to specific details in the error. The transport-specific errors can still be unwrapped using the `apierror.APIError`. - if err != nil { - var ae *apierror.APIError - if errors.As(err, &ae) { - log.Println(ae.Reason()) - log.Println(ae.Details().Help.GetLinks()) - } - } + + if err != nil { + var ae *apierror.APIError + if errors.As(err, &ae) { + log.Println(ae.Reason()) + log.Println(ae.Details().Help.GetLinks()) + } + } If the gRPC transport was used, the `grpc.Status` can still be parsed using the `status.FromError` function. - if err != nil { - if s, ok := status.FromError(err); ok { - log.Println(s.Message()) - for _, d := range s.Proto().Details { - log.Println(d) - } - } - } + + if err != nil { + if s, ok := status.FromError(err); ok { + log.Println(s.Message()) + for _, d := range s.Proto().Details { + log.Println(d) + } + } + } If the REST transport was used, the `googleapi.Error` can be parsed in a similar way. - if err != nil { - var gerr *googleapi.Error - if errors.As(err, &gerr) { - log.Println(gerr.Message) - } - } - -Client Stability + + if err != nil { + var gerr *googleapi.Error + if errors.As(err, &gerr) { + log.Println(gerr.Message) + } + } + +# Client Stability Clients in this repository are considered alpha or beta unless otherwise marked as stable in the README.md. Semver is not used to communicate stability diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index a30a3c102a3..be8372087f7 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -62,6 +62,24 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/apigeeregistry/apiv1": { + "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", + "description": "Apigee Registry API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apikeys/apiv2": { + "distribution_name": "cloud.google.com/go/apikeys/apiv2", + "description": "API Keys API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/appengine/apiv1": { "distribution_name": "cloud.google.com/go/appengine/apiv1", "description": "App Engine Admin API", @@ -80,6 +98,15 @@ "release_level": "alpha", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/artifactregistry/apiv1": { + "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", + "description": "Artifact Registry API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/artifactregistry/apiv1beta2": { "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", "description": "Artifact Registry API", @@ -122,7 +149,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { @@ -152,6 +179,69 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/baremetalsolution/apiv2": { + "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", + "description": "Bare Metal Solution API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/batch/apiv1": { + "distribution_name": "cloud.google.com/go/batch/apiv1", + "description": "Batch API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnections/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/appgateways/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { + "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", + "description": "BeyondCorp API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery": { "distribution_name": "cloud.google.com/go/bigquery", "description": "BigQuery", @@ -302,7 +392,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1beta1": { @@ -449,6 +539,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/dataform/apiv1alpha2": { + "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", + "description": "Dataform API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", + "release_level": "alpha", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/datafusion/apiv1": { "distribution_name": "cloud.google.com/go/datafusion/apiv1", "description": "Cloud Data Fusion API", @@ -473,7 +572,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataproc/apiv1": { @@ -518,7 +617,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1alpha1": { @@ -701,6 +800,24 @@ "release_level": "ga", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/functions/apiv2": { + "distribution_name": "cloud.google.com/go/functions/apiv2", + "description": "Cloud Functions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/functions/apiv2beta": { + "distribution_name": "cloud.google.com/go/functions/apiv2beta", + "description": "Cloud Functions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/functions/metadata": { "distribution_name": "cloud.google.com/go/functions/metadata", "description": "Cloud Functions", @@ -806,7 +923,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iot/apiv1": { @@ -1013,7 +1130,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1beta1": { @@ -1031,7 +1148,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orchestration/airflow/service/apiv1": { @@ -1477,7 +1594,7 @@ }, "cloud.google.com/go/spanner/admin/database/apiv1": { "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", - "description": "Cloud Spanner Database Admin API", + "description": "Cloud Spanner API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", @@ -1553,7 +1670,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4beta1": { @@ -1616,7 +1733,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/stitcher/apiv1": { @@ -1625,7 +1742,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/transcoder/apiv1": { @@ -1655,6 +1772,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/videointelligence/apiv1p3beta1": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", + "description": "Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/vision/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", "description": "Cloud Vision API", @@ -1669,7 +1795,7 @@ "description": "Cloud Vision API", "language": "Go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/v2/apiv1", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", "release_level": "ga", "library_type": "GAPIC_AUTO" }, @@ -1679,7 +1805,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vpcaccess/apiv1": { @@ -1724,7 +1850,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1beta": { diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go index 6435695ba34..30d7bcf77ac 100644 --- a/vendor/cloud.google.com/go/internal/annotate.go +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -31,7 +31,8 @@ import ( // - "google.golang.org/api/googleapi".Error // If the error is not one of these types, Annotate behaves // like -// fmt.Errorf("%s: %v", msg, err) +// +// fmt.Errorf("%s: %v", msg, err) func Annotate(err error, msg string) error { if err == nil { panic("Annotate called with nil") diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index 2dd7a1b9d7f..fe028df58f9 100644 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -21,6 +21,12 @@ "apigeeconnect": { "component": "apigeeconnect" }, + "apigeeregistry": { + "component": "apigeeregistry" + }, + "apikeys": { + "component": "apikeys" + }, "appengine": { "component": "appengine" }, @@ -39,6 +45,15 @@ "automl": { "component": "automl" }, + "baremetalsolution": { + "component": "baremetalsolution" + }, + "batch": { + "component": "batch" + }, + "beyondcorp": { + "component": "beyondcorp" + }, "billing": { "component": "billing" }, @@ -78,6 +93,9 @@ "dataflow": { "component": "dataflow" }, + "dataform": { + "component": "dataform" + }, "datafusion": { "component": "datafusion" }, diff --git a/vendor/cloud.google.com/go/storage/.release-please-manifest.json b/vendor/cloud.google.com/go/storage/.release-please-manifest.json index a068903e032..3186e6d255d 100644 --- a/vendor/cloud.google.com/go/storage/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/storage/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "storage": "1.22.1" + "storage": "1.27.0" } \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index d88a1b2c8ea..7dbf460aabf 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,53 @@ # Changes +## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) + + +### Features + +* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8)) + +## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) + + +### Features + +* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362) + + +### Bug Fixes + +* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e)) + +## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11) + + +### Features + +* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f)) +* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e)) + +## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20) + + +### Features + +* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d)) + +## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23) + + +### Features + +* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d)) +* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795) + + +### Bug Fixes + +* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473)) + ### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19) diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md index 8536d9cab98..f18c37309a5 100644 --- a/vendor/cloud.google.com/go/storage/README.md +++ b/vendor/cloud.google.com/go/storage/README.md @@ -2,7 +2,7 @@ - [About Cloud Storage](https://cloud.google.com/storage/) - [API documentation](https://cloud.google.com/storage/docs) -- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage) +- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest) - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) ### Example Usage diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 0c2374008bd..e0ab60073c2 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -20,9 +20,8 @@ import ( "reflect" "cloud.google.com/go/internal/trace" - "google.golang.org/api/googleapi" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" raw "google.golang.org/api/storage/v1" - storagepb "google.golang.org/genproto/googleapis/storage/v2" ) // ACLRole is the level of access to grant. @@ -67,6 +66,8 @@ type ProjectTeam struct { } // ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +// ACLHandle on an object operates on the latest generation of that object by default. +// Selecting a specific generation of an object is not currently supported by the client. type ACLHandle struct { c *Client bucket string @@ -119,111 +120,46 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { } func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) - a.configureCall(ctx, req) - err = run(ctx, func() error { - acls, err = req.Do() - return err - }, a.retry, true, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...) } func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { - req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(ctx, req) - - return run(ctx, func() error { - return req.Do() - }, a.retry, false, setRetryHeaderHTTP(req)) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...) } func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.BucketAccessControls - var err error - req := a.c.raw.BucketAccessControls.List(a.bucket) - a.configureCall(ctx, req) - err = run(ctx, func() error { - acls, err = req.Do() - return err - }, a.retry, true, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return toBucketACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...) } func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { - acl := &raw.BucketAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) - a.configureCall(ctx, req) - return run(ctx, func() error { - _, err := req.Do() - return err - }, a.retry, false, setRetryHeaderHTTP(req)) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...) } func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { - req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) - a.configureCall(ctx, req) - return run(ctx, func() error { - return req.Do() - }, a.retry, false, setRetryHeaderHTTP(req)) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...) } func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { - var acls *raw.ObjectAccessControls - var err error - req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) - a.configureCall(ctx, req) - err = run(ctx, func() error { - acls, err = req.Do() - return err - }, a.retry, true, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil + opts := makeStorageOpts(true, a.retry, a.userProject) + return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...) } func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - - acl := &raw.ObjectAccessControl{ - Bucket: a.bucket, - Entity: string(entity), - Role: string(role), - } - var req setRequest + opts := makeStorageOpts(false, a.retry, a.userProject) if isBucketDefault { - req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) - } else { - req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...) } - a.configureCall(ctx, req) - return run(ctx, func() error { - _, err := req.Do() - return err - }, a.retry, false, setRetryHeaderHTTP(req)) + return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...) } func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { - req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) - a.configureCall(ctx, req) - return run(ctx, func() error { - return req.Do() - }, a.retry, false, setRetryHeaderHTTP(req)) + opts := makeStorageOpts(false, a.retry, a.userProject) + return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) } func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 101f695b5a6..ea83e81a1bf 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -20,21 +20,20 @@ import ( "encoding/json" "errors" "fmt" - "net/http" "reflect" + "strings" "time" "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" - "github.com/googleapis/go-type-adapters/adapters" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "google.golang.org/api/googleapi" "google.golang.org/api/iamcredentials/v1" "google.golang.org/api/iterator" "google.golang.org/api/option" raw "google.golang.org/api/storage/v1" - "google.golang.org/genproto/googleapis/storage/v2" - storagepb "google.golang.org/genproto/googleapis/storage/v2" + dpb "google.golang.org/genproto/googleapis/type/date" "google.golang.org/protobuf/proto" ) @@ -56,7 +55,8 @@ type BucketHandle struct { // The supplied name must contain only lowercase letters, numbers, dashes, // underscores, and dots. The full specification for valid bucket names can be // found at: -// https://cloud.google.com/storage/docs/bucket-naming +// +// https://cloud.google.com/storage/docs/bucket-naming func (c *Client) Bucket(name string) *BucketHandle { retry := c.retry.clone() return &BucketHandle{ @@ -83,27 +83,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") defer func() { trace.EndSpan(ctx, err) }() - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = b.name - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if bkt.Location == "" && bkt.Lifecycle != nil { - bkt.Location = "US" - } - req := b.c.raw.Buckets.Insert(projectID, bkt) - setClientHeader(req.Header()) - if attrs != nil && attrs.PredefinedACL != "" { - req.PredefinedAcl(attrs.PredefinedACL) - } - if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + o := makeStorageOpts(true, b.retry, b.userProject) + if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil { + return err } - return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true, setRetryHeaderHTTP(req)) + return nil } // Delete deletes the Bucket. @@ -111,24 +95,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newDeleteCall() - if err != nil { - return err - } - - return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true, setRetryHeaderHTTP(req)) -} - -func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { - req := b.c.raw.Buckets.Delete(b.name) - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...) } // ACL returns an ACLHandle, which provides access to the bucket's access control list. @@ -151,7 +119,8 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle { // // name must consist entirely of valid UTF-8-encoded runes. The full specification // for valid object names can be found at: -// https://cloud.google.com/storage/docs/naming-objects +// +// https://cloud.google.com/storage/docs/naming-objects func (b *BucketHandle) Object(name string) *ObjectHandle { retry := b.retry.clone() return &ObjectHandle{ @@ -176,35 +145,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newGetCall() - if err != nil { - return nil, err - } - var resp *raw.Bucket - err = run(ctx, func() error { - resp, err = req.Context(ctx).Do() - return err - }, b.retry, true, setRetryHeaderHTTP(req)) - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp) -} - -func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { - req := b.c.raw.Buckets.Get(b.name).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.GetBucket(ctx, b.name, b.conds, o...) } // Update updates a bucket's attributes. @@ -212,62 +154,23 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) ( ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") defer func() { trace.EndSpan(ctx, err) }() - req, err := b.newPatchCall(&uattrs) - if err != nil { - return nil, err - } - if uattrs.PredefinedACL != "" { - req.PredefinedAcl(uattrs.PredefinedACL) - } - if uattrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) - } - isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 - - var rawBucket *raw.Bucket - call := func() error { - rb, err := req.Context(ctx).Do() - rawBucket = rb - return err - } - - if err := run(ctx, call, b.retry, isIdempotent, setRetryHeaderHTTP(req)); err != nil { - return nil, err - } - return newBucket(rawBucket) -} - -func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { - rb := uattrs.toRawBucket() - req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") - setClientHeader(req.Header()) - if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { - return nil, err - } - if b.userProject != "" { - req.UserProject(b.userProject) - } - return req, nil + o := makeStorageOpts(isIdempotent, b.retry, b.userProject) + return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...) } // SignedURL returns a URL for the specified object. Signed URLs allow anyone -// access to a restricted resource for a limited time without needing a -// Google account or signing in. For more information about signed URLs, see -// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// access to a restricted resource for a limited time without needing a Google +// account or signing in. +// For more information about signed URLs, see "[Overview of access control]." // -// This method only requires the Method and Expires fields in the specified -// SignedURLOptions opts to be non-nil. If not provided, it attempts to fill the -// GoogleAccessID and PrivateKey from the GOOGLE_APPLICATION_CREDENTIALS environment variable. -// If you are authenticating with a custom HTTP client, Service Account based -// auto-detection will be hindered. +// This method requires the Method and Expires fields in the specified +// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and +// PrivateKey fields in some cases. Read more on the [automatic detection of credentials] +// for this method. // -// If no private key is found, it attempts to use the GoogleAccessID to sign the URL. -// This requires the IAM Service Account Credentials API to be enabled -// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview) -// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account. -// If you do not want these fields set for you, you may pass them in through opts or use -// SignedURL(bucket, name string, opts *SignedURLOptions) instead. +// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4] func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { return SignedURL(b.name, object, opts) @@ -305,18 +208,11 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, // GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. // The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. // -// This method only requires the Expires field in the specified PostPolicyV4Options -// to be non-nil. If not provided, it attempts to fill the GoogleAccessID and PrivateKey -// from the GOOGLE_APPLICATION_CREDENTIALS environment variable. -// If you are authenticating with a custom HTTP client, Service Account based -// auto-detection will be hindered. +// This method requires the Expires field in the specified PostPolicyV4Options +// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields +// in some cases. Read more on the [automatic detection of credentials] for this method. // -// If no private key is found, it attempts to use the GoogleAccessID to sign the URL. -// This requires the IAM Service Account Credentials API to be enabled -// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview) -// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account. -// If you do not want these fields set for you, you may pass them in through opts or use -// GenerateSignedPostPolicyV4(bucket, name string, opts *PostPolicyV4Options) instead. +// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4] func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { return GenerateSignedPostPolicyV4(b.name, object, opts) @@ -356,17 +252,27 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { if b.c.creds != nil && len(b.c.creds.JSON) > 0 { var sa struct { - ClientEmail string `json:"client_email"` + ClientEmail string `json:"client_email"` + SAImpersonationURL string `json:"service_account_impersonation_url"` + CredType string `json:"type"` } + err := json.Unmarshal(b.c.creds.JSON, &sa) - if err == nil && sa.ClientEmail != "" { - return sa.ClientEmail, nil - } else if err != nil { + if err != nil { returnErr = err + } else if sa.CredType == "impersonated_service_account" { + start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") + + if end <= start { + returnErr = errors.New("error parsing impersonated service account credentials") + } else { + return sa.SAImpersonationURL[start+1 : end], nil + } + } else if sa.CredType == "service_account" && sa.ClientEmail != "" { + return sa.ClientEmail, nil } else { - returnErr = errors.New("storage: empty client email in credentials") + returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported") } - } // Don't error out if we can't unmarshal, fallback to GCE check. @@ -377,11 +283,11 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { } else if err != nil { returnErr = err } else { - returnErr = errors.New("got empty email from GCE metadata service") + returnErr = errors.New("empty email from GCE metadata service") } } - return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %v", returnErr) + return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr) } func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { @@ -461,8 +367,13 @@ type BucketAttrs struct { PredefinedDefaultObjectACL string // Location is the location of the bucket. It defaults to "US". + // If specifying a dual-region, CustomPlacementConfig should be set in conjunction. Location string + // The bucket's custom placement configuration that holds a list of + // regional locations for custom dual regions. + CustomPlacementConfig *CustomPlacementConfig + // MetaGeneration is the metadata generation of the bucket. // This field is read-only. MetaGeneration int64 @@ -645,6 +556,13 @@ const ( // SetStorageClassAction changes the storage class of live and/or archived // objects. SetStorageClassAction = "SetStorageClass" + + // AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete + // multipart upload when the multipart upload meets the conditions specified + // in the lifecycle rule. The AgeInDays condition is the only allowed + // condition for this action. AgeInDays is measured from the time the + // multipart upload was created. + AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload" ) // LifecycleRule is a lifecycle configuration rule. @@ -665,9 +583,8 @@ type LifecycleRule struct { type LifecycleAction struct { // Type is the type of action to take on matching objects. // - // Acceptable values are "Delete" to delete matching objects and - // "SetStorageClass" to set the storage class defined in StorageClass on - // matching objects. + // Acceptable values are storage.DeleteAction, storage.SetStorageClassAction, + // and storage.AbortIncompleteMPUAction. Type string // StorageClass is the storage class to set on matching objects if the Action @@ -692,7 +609,12 @@ const ( // // All configured conditions must be met for the associated action to be taken. type LifecycleCondition struct { + // AllObjects is used to select all objects in a bucket by + // setting AgeInDays to 0. + AllObjects bool + // AgeInDays is the age of the object in days. + // If you want to set AgeInDays to `0` use AllObjects set to `true`. AgeInDays int64 // CreatedBefore is the time the object was created. @@ -710,21 +632,31 @@ type LifecycleCondition struct { // DaysSinceCustomTime is the days elapsed since the CustomTime date of the // object. This condition can only be satisfied if CustomTime has been set. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. DaysSinceCustomTime int64 // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp // of the object. This condition is relevant only for versioned objects. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. DaysSinceNoncurrentTime int64 // Liveness specifies the object's liveness. Relevant only for versioned objects Liveness Liveness + // MatchesPrefix is the condition matching an object if any of the + // matches_prefix strings are an exact prefix of the object's name. + MatchesPrefix []string + // MatchesStorageClasses is the condition matching the object's storage // class. // // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". MatchesStorageClasses []string + // MatchesSuffix is the condition matching an object if any of the + // matches_suffix strings are an exact suffix of the object's name. + MatchesSuffix []string + // NoncurrentTimeBefore is the noncurrent timestamp of the object. This // condition is satisfied when an object's noncurrent timestamp is before // midnight of the specified date in UTC. @@ -737,6 +669,7 @@ type LifecycleCondition struct { // If the value is N, this condition is satisfied when there are at least N // versions (including the live version) newer than this version of the // object. + // Note: Using `0` as the value will be ignored by the library and not sent to the API. NumNewerVersions int64 } @@ -768,6 +701,15 @@ type BucketWebsite struct { NotFoundPage string } +// CustomPlacementConfig holds the bucket's custom placement +// configuration for Custom Dual Regions. See +// https://cloud.google.com/storage/docs/locations#location-dr for more information. +type CustomPlacementConfig struct { + // The list of regional locations in which data is placed. + // Custom Dual Regions require exactly 2 regional locations. + DataLocations []string +} + func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { return nil, nil @@ -801,6 +743,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { LocationType: b.LocationType, ProjectNumber: b.ProjectNumber, RPO: toRPO(b), + CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), }, nil } @@ -831,6 +774,8 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()), LocationType: b.GetLocationType(), RPO: toRPOFromProto(b), + CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), + ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based } } @@ -868,22 +813,23 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { } } return &raw.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toRawBucketACL(b.ACL), - DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toRawLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), - Cors: toRawCORS(b.CORS), - Encryption: b.Encryption.toRawBucketEncryption(), - Logging: b.Logging.toRawBucketLogging(), - Website: b.Website.toRawBucketWebsite(), - IamConfiguration: bktIAM, - Rpo: b.RPO.String(), + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toRawBucketACL(b.ACL), + DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), + Logging: b.Logging.toRawBucketLogging(), + Website: b.Website.toRawBucketWebsite(), + IamConfiguration: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), } } @@ -910,7 +856,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { } var bb *storagepb.Bucket_Billing if b.RequesterPays { - bb = &storage.Bucket_Billing{RequesterPays: true} + bb = &storagepb.Bucket_Billing{RequesterPays: true} } var bktIAM *storagepb.Bucket_IamConfig if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { @@ -926,22 +872,23 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { } return &storagepb.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toProtoBucketACL(b.ACL), - DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toProtoLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(), - Cors: toProtoCORS(b.CORS), - Encryption: b.Encryption.toProtoBucketEncryption(), - Logging: b.Logging.toProtoBucketLogging(), - Website: b.Website.toProtoBucketWebsite(), - IamConfig: bktIAM, - Rpo: b.RPO.String(), + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toProtoBucketACL(b.ACL), + DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toProtoLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(), + Cors: toProtoCORS(b.CORS), + Encryption: b.Encryption.toProtoBucketEncryption(), + Logging: b.Logging.toProtoBucketLogging(), + Website: b.Website.toProtoBucketWebsite(), + IamConfig: bktIAM, + Rpo: b.RPO.String(), + CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), } } @@ -958,7 +905,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { } var bb *storagepb.Bucket_Billing if ua.RequesterPays != nil { - bb = &storage.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} + bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} } var bktIAM *storagepb.Bucket_IamConfig var ublaEnabled bool @@ -1333,15 +1280,8 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle { // most customers. It might be changed in backwards-incompatible ways and is not // subject to any SLA or deprecation policy. func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { - var metageneration int64 - if b.conds != nil { - metageneration = b.conds.MetagenerationMatch - } - req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) - return run(ctx, func() error { - _, err := req.Context(ctx).Do() - return err - }, b.retry, true, setRetryHeaderHTTP(req)) + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) } // applyBucketConds modifies the provided call using the conditions in conds. @@ -1501,14 +1441,25 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { StorageClass: r.Action.StorageClass, }, Condition: &raw.BucketLifecycleRuleCondition{ - Age: r.Condition.AgeInDays, DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: r.Condition.MatchesPrefix, MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, NumNewerVersions: r.Condition.NumNewerVersions, }, } + // AllObjects takes precedent when both AllObjects and AgeInDays are set + // Rationale: If you've opted into using AllObjects, it makes sense that you + // understand the implications of how this option works with AgeInDays. + if r.Condition.AllObjects { + rr.Condition.Age = googleapi.Int64(0) + rr.Condition.ForceSendFields = []string{"Age"} + } else if r.Condition.AgeInDays > 0 { + rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays) + } + switch r.Condition.Liveness { case LiveAndArchived: rr.Condition.IsLive = nil @@ -1549,11 +1500,18 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { AgeDays: proto.Int32(int32(r.Condition.AgeInDays)), DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), + MatchesPrefix: r.Condition.MatchesPrefix, MatchesStorageClass: r.Condition.MatchesStorageClasses, + MatchesSuffix: r.Condition.MatchesSuffix, NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)), }, } + // TODO(#6205): This may not be needed for gRPC + if r.Condition.AllObjects { + rr.Condition.AgeDays = proto.Int32(0) + } + switch r.Condition.Liveness { case LiveAndArchived: rr.Condition.IsLive = nil @@ -1564,13 +1522,13 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { } if !r.Condition.CreatedBefore.IsZero() { - rr.Condition.CreatedBefore = adapters.TimeToProtoDate(r.Condition.CreatedBefore) + rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore) } if !r.Condition.CustomTimeBefore.IsZero() { - rr.Condition.CustomTimeBefore = adapters.TimeToProtoDate(r.Condition.CustomTimeBefore) + rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore) } if !r.Condition.NoncurrentTimeBefore.IsZero() { - rr.Condition.NoncurrentTimeBefore = adapters.TimeToProtoDate(r.Condition.NoncurrentTimeBefore) + rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore) } rl.Rule = append(rl.Rule, rr) } @@ -1589,13 +1547,20 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { StorageClass: rr.Action.StorageClass, }, Condition: LifecycleCondition{ - AgeInDays: rr.Condition.Age, DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, + MatchesPrefix: rr.Condition.MatchesPrefix, MatchesStorageClasses: rr.Condition.MatchesStorageClass, + MatchesSuffix: rr.Condition.MatchesSuffix, NumNewerVersions: rr.Condition.NumNewerVersions, }, } + if rr.Condition.Age != nil { + r.Condition.AgeInDays = *rr.Condition.Age + if *rr.Condition.Age == 0 { + r.Condition.AllObjects = true + } + } if rr.Condition.IsLive == nil { r.Condition.Liveness = LiveAndArchived @@ -1634,11 +1599,18 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { AgeInDays: int64(rr.GetCondition().GetAgeDays()), DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()), DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()), + MatchesPrefix: rr.GetCondition().GetMatchesPrefix(), MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(), + MatchesSuffix: rr.GetCondition().GetMatchesSuffix(), NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()), }, } + // TODO(#6205): This may not be needed for gRPC + if rr.GetCondition().GetAgeDays() == 0 { + r.Condition.AllObjects = true + } + if rr.GetCondition().IsLive == nil { r.Condition.Liveness = LiveAndArchived } else if rr.GetCondition().GetIsLive() { @@ -1648,13 +1620,13 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { } if rr.GetCondition().GetCreatedBefore() != nil { - r.Condition.CreatedBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCreatedBefore()) + r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore()) } if rr.GetCondition().GetCustomTimeBefore() != nil { - r.Condition.CustomTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore()) + r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore()) } if rr.GetCondition().GetNoncurrentTimeBefore() != nil { - r.Condition.NoncurrentTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore()) + r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore()) } l.Rules = append(l.Rules, r) } @@ -1708,7 +1680,7 @@ func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging { return nil } return &storagepb.Bucket_Logging{ - LogBucket: b.LogBucket, + LogBucket: bucketResourceName(globalProjectAlias, b.LogBucket), LogObjectPrefix: b.LogObjectPrefix, } } @@ -1727,8 +1699,9 @@ func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging { if b == nil { return nil } + lb := parseBucketName(b.GetLogBucket()) return &BucketLogging{ - LogBucket: b.GetLogBucket(), + LogBucket: lb, LogObjectPrefix: b.GetLogObjectPrefix(), } } @@ -1881,24 +1854,46 @@ func toRPOFromProto(b *storagepb.Bucket) RPO { } } +func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.DataLocations} +} + +func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig { + if c == nil { + return nil + } + return &raw.BucketCustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig { + if c == nil { + return nil + } + return &storagepb.Bucket_CustomPlacementConfig{ + DataLocations: c.DataLocations, + } +} + +func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig { + if c == nil { + return nil + } + return &CustomPlacementConfig{DataLocations: c.GetDataLocations()} +} + // Objects returns an iterator over the objects in the bucket that match the // Query q. If q is nil, no filtering is done. Objects will be iterated over // lexicographically by name. // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { - it := &ObjectIterator{ - ctx: ctx, - bucket: b, - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - if q != nil { - it.query = *q - } - return it + o := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.ListObjects(ctx, b.name, q, o...) } // Retryer returns a bucket handle that is configured with custom retry @@ -1933,7 +1928,6 @@ func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle { // Note: This iterator is not safe for concurrent operations without explicit synchronization. type ObjectIterator struct { ctx context.Context - bucket *BucketHandle query Query pageInfo *iterator.PageInfo nextFunc func() error @@ -1970,52 +1964,6 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) { return item, nil } -func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { - req := it.bucket.c.raw.Objects.List(it.bucket.name) - setClientHeader(req.Header()) - projection := it.query.Projection - if projection == ProjectionDefault { - projection = ProjectionFull - } - req.Projection(projection.String()) - req.Delimiter(it.query.Delimiter) - req.Prefix(it.query.Prefix) - req.StartOffset(it.query.StartOffset) - req.EndOffset(it.query.EndOffset) - req.Versions(it.query.Versions) - req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) - if len(it.query.fieldSelection) > 0 { - req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) - } - req.PageToken(pageToken) - if it.bucket.userProject != "" { - req.UserProject(it.bucket.userProject) - } - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Objects - var err error - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }, it.bucket.retry, true, setRetryHeaderHTTP(req)) - if err != nil { - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - err = ErrBucketNotExist - } - return "", err - } - for _, item := range resp.Items { - it.items = append(it.items, newObject(item)) - } - for _, prefix := range resp.Prefixes { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - return resp.NextPageToken, nil -} - // Buckets returns an iterator over the buckets in the project. You may // optionally set the iterator's Prefix field to restrict the list to buckets // whose names begin with the prefix. By default, all buckets in the project @@ -2023,17 +1971,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { - it := &BucketIterator{ - ctx: ctx, - client: c, - projectID: projectID, - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - - return it + o := makeStorageOpts(true, c.retry, "") + return c.tc.ListBuckets(ctx, projectID, o...) } // A BucketIterator is an iterator over BucketAttrs. @@ -2044,7 +1983,6 @@ type BucketIterator struct { Prefix string ctx context.Context - client *Client projectID string buckets []*BucketAttrs pageInfo *iterator.PageInfo @@ -2070,36 +2008,6 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) { // Note: This method is not safe for concurrent operations without explicit synchronization. func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } -// TODO: When the transport-agnostic client interface is integrated into the Veneer, -// this method should be removed, and the iterator should be initialized by the -// transport-specific client implementations. -func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { - req := it.client.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) - req.Projection("full") - req.Prefix(it.Prefix) - req.PageToken(pageToken) - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Buckets - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() - return err - }, it.client.retry, true, setRetryHeaderHTTP(req)) - if err != nil { - return "", err - } - for _, item := range resp.Items { - b, err := newBucket(item) - if err != nil { - return "", err - } - it.buckets = append(it.buckets, b) - } - return resp.NextPageToken, nil -} - // RPO (Recovery Point Objective) configures the turbo replication feature. See // https://cloud.google.com/storage/docs/managing-turbo-replication for more information. type RPO int @@ -2135,3 +2043,28 @@ func (rpo RPO) String() string { return rpoUnknown } } + +// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToUTCTime(d *dpb.Date) time.Time { + return protoDateToTime(d, time.UTC) +} + +// protoDateToTime returns a new Time based on the google.type.Date and provided +// *time.Location. +// +// Hours, minutes, seconds, and nanoseconds are set to 0. +func protoDateToTime(d *dpb.Date, l *time.Location) time.Time { + return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l) +} + +// timeToProtoDate returns a new google.type.Date based on the provided time.Time. +// The location is ignored, as is anything more precise than the day. +func timeToProtoDate(t time.Time) *dpb.Date { + return &dpb.Date{ + Year: int32(t.Year()), + Month: int32(t.Month()), + Day: int32(t.Day()), + } +} diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 87ee41927d0..40eb576ce67 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -16,6 +16,8 @@ package storage import ( "context" + "io" + "time" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/option" @@ -42,7 +44,7 @@ type storageClient interface { // Top-level methods. GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) - CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) + CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator Close() error @@ -56,35 +58,35 @@ type storageClient interface { // Object metadata methods. - DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error - GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) - UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error + GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) // Default Object ACL methods. DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) - UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) + UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error // Bucket ACL methods. DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) - UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) + UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error // Object ACL methods. DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) - UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) + UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error // Media operations. ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) - OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error - OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error + NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error) + OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) // IAM methods. @@ -94,11 +96,16 @@ type storageClient interface { // HMAC Key methods. - GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) - ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator - UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) - CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) - DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error + GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) + ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator + UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) + CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) + DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error + + // Notification methods. + ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) + CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) + DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error } // settings contains transport-agnostic configuration for API calls made via @@ -155,6 +162,20 @@ func callSettings(defaults *settings, opts ...storageOption) *settings { return &cs } +// makeStorageOpts is a helper for generating a set of storageOption based on +// idempotency, retryConfig, and userProject. All top-level client operations +// will generally have to pass these options through the interface. +func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption { + opts := []storageOption{idempotent(isIdempotent)} + if retry != nil { + opts = append(opts, withRetryConfig(retry)) + } + if userProject != "" { + opts = append(opts, withUserProject(userProject)) + } + return opts +} + // storageOption is the transport-agnostic call option for the storageClient // interface. type storageOption interface { @@ -211,24 +232,93 @@ type userProjectOption struct { func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project } +type openWriterParams struct { + // Writer configuration + + // ctx is the context used by the writer routine to make all network calls + // and to manage the writer routine - see `Writer.ctx`. + // Required. + ctx context.Context + // chunkSize - see `Writer.ChunkSize`. + // Optional. + chunkSize int + // chunkRetryDeadline - see `Writer.ChunkRetryDeadline`. + // Optional. + chunkRetryDeadline time.Duration + + // Object/request properties + + // bucket - see `Writer.o.bucket`. + // Required. + bucket string + // attrs - see `Writer.ObjectAttrs`. + // Required. + attrs *ObjectAttrs + // conds - see `Writer.o.conds`. + // Optional. + conds *Conditions + // encryptionKey - see `Writer.o.encryptionKey` + // Optional. + encryptionKey []byte + // sendCRC32C - see `Writer.SendCRC32C`. + // Optional. + sendCRC32C bool + + // Writer callbacks + + // donec - see `Writer.donec`. + // Required. + donec chan struct{} + // setError callback for reporting errors - see `Writer.error`. + // Required. + setError func(error) + // progress callback for reporting upload progress - see `Writer.progress`. + // Required. + progress func(int64) + // setObj callback for reporting the resulting object - see `Writer.obj`. + // Required. + setObj func(*ObjectAttrs) +} + +type newRangeReaderParams struct { + bucket string + conds *Conditions + encryptionKey []byte + gen int64 + length int64 + object string + offset int64 + readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. +} + type composeObjectRequest struct { dstBucket string - dstObject string - srcs []string - gen int64 - conds *Conditions + dstObject destinationObject + srcs []sourceObject predefinedACL string + sendCRC32C bool } -type rewriteObjectRequest struct { - srcBucket string - srcObject string - dstBucket string - dstObject string - dstKeyName string - attrs *ObjectAttrs +type sourceObject struct { + name string + bucket string gen int64 conds *Conditions + encryptionKey []byte +} + +type destinationObject struct { + name string + bucket string + conds *Conditions + attrs *ObjectAttrs // attrs to set on the destination object. + encryptionKey []byte + keyName string +} + +type rewriteObjectRequest struct { + srcObject sourceObject + dstObject destinationObject predefinedACL string token string } @@ -237,5 +327,6 @@ type rewriteObjectResponse struct { resource *ObjectAttrs done bool written int64 + size int64 token string } diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 26865fa4714..60ed81391e6 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -20,7 +20,6 @@ import ( "fmt" "cloud.google.com/go/internal/trace" - raw "google.golang.org/api/storage/v1" ) // CopierFrom creates a Copier that can copy src to dst. @@ -86,69 +85,57 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen) + } // Convert destination attributes to raw form, omitting the bucket. // If the bucket is included but name or content-type aren't, the service // returns a 400 with "Required" as the only message. Omitting the bucket // does not cause any problems. - rawObject := c.ObjectAttrs.toRawObject("") + req := &rewriteObjectRequest{ + srcObject: sourceObject{ + name: c.src.object, + bucket: c.src.bucket, + gen: c.src.gen, + conds: c.src.conds, + encryptionKey: c.src.encryptionKey, + }, + dstObject: destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, + keyName: c.DestinationKMSKeyName, + }, + predefinedACL: c.PredefinedACL, + token: c.RewriteToken, + } + + isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) + var userProject string + if c.dst.userProject != "" { + userProject = c.dst.userProject + } else if c.src.userProject != "" { + userProject = c.src.userProject + } + opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject) + for { - res, err := c.callRewrite(ctx, rawObject) + res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...) if err != nil { return nil, err } + c.RewriteToken = res.token if c.ProgressFunc != nil { - c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + c.ProgressFunc(uint64(res.written), uint64(res.size)) } - if res.Done { // Finished successfully. - return newObject(res.Resource), nil + if res.done { // Finished successfully. + return res.resource, nil } } } -func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { - call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) - - call.Context(ctx).Projection("full") - if c.RewriteToken != "" { - call.RewriteToken(c.RewriteToken) - } - if c.DestinationKMSKeyName != "" { - call.DestinationKmsKeyName(c.DestinationKMSKeyName) - } - if c.PredefinedACL != "" { - call.DestinationPredefinedAcl(c.PredefinedACL) - } - if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err - } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) - } else if c.src.userProject != "" { - call.UserProject(c.src.userProject) - } - if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err - } - if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { - return nil, err - } - var res *raw.RewriteResponse - var err error - setClientHeader(call.Header()) - - retryCall := func() error { res, err = call.Do(); return err } - isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) - - if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - c.RewriteToken = res.RewriteToken - return res, nil -} - // ComposerFrom creates a Composer that can compose srcs into dst. // You can immediately call Run on the returned Composer, or you can // configure it first. @@ -188,17 +175,13 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if err := c.dst.validate(); err != nil { return nil, err } + if c.dst.gen != defaultGen { + return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen) + } if len(c.srcs) == 0 { return nil, errors.New("storage: at least one source object must be specified") } - req := &raw.ComposeRequest{} - // Compose requires a non-empty Destination, so we always set it, - // even if the caller-provided ObjectAttrs is the zero value. - req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) - if c.SendCRC32C { - req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C) - } for _, src := range c.srcs { if err := src.validate(); err != nil { return nil, err @@ -209,36 +192,31 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if src.encryptionKey != nil { return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) } - srcObj := &raw.ComposeRequestSourceObjects{ - Name: src.object, - } - if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { - return nil, err - } - req.SourceObjects = append(req.SourceObjects, srcObj) } - call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) - if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { - return nil, err + req := &composeObjectRequest{ + dstBucket: c.dst.bucket, + predefinedACL: c.PredefinedACL, + sendCRC32C: c.SendCRC32C, } - if c.dst.userProject != "" { - call.UserProject(c.dst.userProject) - } - if c.PredefinedACL != "" { - call.DestinationPredefinedAcl(c.PredefinedACL) + req.dstObject = destinationObject{ + name: c.dst.object, + bucket: c.dst.bucket, + conds: c.dst.conds, + attrs: &c.ObjectAttrs, + encryptionKey: c.dst.encryptionKey, } - if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { - return nil, err + for _, src := range c.srcs { + s := sourceObject{ + name: src.object, + bucket: src.bucket, + gen: src.gen, + conds: src.conds, + } + req.srcs = append(req.srcs, s) } - var obj *raw.Object - setClientHeader(call.Header()) - retryCall := func() error { obj, err = call.Do(); return err } isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) - - if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - return newObject(obj), nil + opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject) + return c.dst.c.tc.ComposeObject(ctx, req, opts...) } diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index f4654b32c3e..8bf3098431e 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -22,185 +22,183 @@ https://cloud.google.com/storage/docs. See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts, connection pooling and similar aspects of this package. +# Creating a Client -Creating a Client +To start working with this package, create a [Client]: -To start working with this package, create a client: - - ctx := context.Background() - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } The client will use your default application credentials. Clients should be -reused instead of created as needed. The methods of Client are safe for +reused instead of created as needed. The methods of [Client] are safe for concurrent use by multiple goroutines. If you only wish to access public data, you can create an unauthenticated client with - client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST environment variable to the address at which your emulator is running. This will send requests to that address instead of to Cloud Storage. You can then create and use a client as usual: - // Set STORAGE_EMULATOR_HOST environment variable. - err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") - if err != nil { - // TODO: Handle error. - } + // Set STORAGE_EMULATOR_HOST environment variable. + err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") + if err != nil { + // TODO: Handle error. + } - // Create client as usual. - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } + // Create client as usual. + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } - // This request is now directed to http://localhost:9000/storage/v1/b - // instead of https://storage.googleapis.com/storage/v1/b - if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } + // This request is now directed to http://localhost:9000/storage/v1/b + // instead of https://storage.googleapis.com/storage/v1/b + if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } Please note that there is no official emulator for Cloud Storage. -Buckets +# Buckets A Google Cloud Storage bucket is a collection of objects. To work with a bucket, make a bucket handle: - bkt := client.Bucket(bucketName) + bkt := client.Bucket(bucketName) A handle is a reference to a bucket. You can have a handle even if the bucket doesn't exist yet. To create a bucket in Google Cloud Storage, -call Create on the handle: +call [BucketHandle.Create]: - if err := bkt.Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } Note that although buckets are associated with projects, bucket names are global across all projects. Each bucket has associated metadata, represented in this package by -BucketAttrs. The third argument to BucketHandle.Create allows you to set -the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use -Attrs: +[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set +the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use +[BucketHandle.Attrs]: - attrs, err := bkt.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", - attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) -Objects +# Objects An object holds arbitrary data as a sequence of bytes, like a file. You refer to objects using a handle, just as with buckets, but unlike buckets you don't explicitly create an object. Instead, the first time you write -to an object it will be created. You can use the standard Go io.Reader -and io.Writer interfaces to read and write object data: - - obj := bkt.Object("data") - // Write something to obj. - // w implements io.Writer. - w := obj.NewWriter(ctx) - // Write some text to obj. This will either create the object or overwrite whatever is there already. - if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { - // TODO: Handle error. - } - // Close, just like writing a file. - if err := w.Close(); err != nil { - // TODO: Handle error. - } - - // Read it back. - r, err := obj.NewReader(ctx) - if err != nil { - // TODO: Handle error. - } - defer r.Close() - if _, err := io.Copy(os.Stdout, r); err != nil { - // TODO: Handle error. - } - // Prints "This object contains text." - -Objects also have attributes, which you can fetch with Attrs: - - objAttrs, err := obj.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("object %s has size %d and can be read using %s\n", - objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) - -Listing objects - -Listing objects in a bucket is done with the Bucket.Objects method: - - query := &storage.Query{Prefix: ""} - - var names []string - it := bkt.Objects(ctx, query) - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - log.Fatal(err) - } - names = append(names, attrs.Name) - } +to an object it will be created. You can use the standard Go [io.Reader] +and [io.Writer] interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +# Listing objects + +Listing objects in a bucket is done with the [BucketHandle.Objects] method: + + query := &storage.Query{Prefix: ""} + + var names []string + it := bkt.Objects(ctx, query) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + names = append(names, attrs.Name) + } Objects are listed lexicographically by name. To filter objects -lexicographically, Query.StartOffset and/or Query.EndOffset can be used: +lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used: - query := &storage.Query{ - Prefix: "", - StartOffset: "bar/", // Only list objects lexicographically >= "bar/" - EndOffset: "foo/", // Only list objects lexicographically < "foo/" - } + query := &storage.Query{ + Prefix: "", + StartOffset: "bar/", // Only list objects lexicographically >= "bar/" + EndOffset: "foo/", // Only list objects lexicographically < "foo/" + } - // ... as before + // ... as before If only a subset of object attributes is needed when listing, specifying this -subset using Query.SetAttrSelection may speed up the listing process: +subset using [Query.SetAttrSelection] may speed up the listing process: - query := &storage.Query{Prefix: ""} - query.SetAttrSelection([]string{"Name"}) + query := &storage.Query{Prefix: ""} + query.SetAttrSelection([]string{"Name"}) - // ... as before + // ... as before -ACLs +# ACLs Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of ACLRules, each of which specifies the role of a user, group or project. ACLs are suitable for fine-grained control, but you may prefer using IAM to control -access at the project level (see -https://cloud.google.com/storage/docs/access-control/iam). +access at the project level (see [Cloud Storage IAM docs]. -To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: +To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]: - acls, err := obj.ACL().List(ctx) - if err != nil { - // TODO: Handle error. - } - for _, rule := range acls { - fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) - } + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } You can also set and delete ACLs. -Conditions +# Conditions Every object has a generation and a metageneration. The generation changes whenever the content changes, and the metageneration changes whenever the -metadata changes. Conditions let you check these values before an operation; +metadata changes. [Conditions] let you check these values before an operation; the operation only executes if the conditions match. You can use conditions to prevent race conditions in read-modify-write operations. @@ -208,60 +206,81 @@ For example, say you've read an object's metadata into objAttrs. Now you want to write to that object, but only if its contents haven't changed since you read it. Here is how to express that: - w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) - // Proceed with writing as above. + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. -Signed URLs +# Signed URLs You can obtain a URL that lets anyone read or write an object for a limited time. Signing a URL requires credentials authorized to sign a URL. To use the same -authentication that was used when instantiating the Storage client, use the -BucketHandle.SignedURL method. +authentication that was used when instantiating the Storage client, use +[BucketHandle.SignedURL]. - url, err := client.Bucket(bucketName).SignedURL(objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) + url, err := client.Bucket(bucketName).SignedURL(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) -You can also sign a URL wihout creating a client. See the documentation of -SignedURL for details. +You can also sign a URL without creating a client. See the documentation of +[SignedURL] for details. - url, err := storage.SignedURL(bucketName, "shared-object", opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) -Post Policy V4 Signed Request +# Post Policy V4 Signed Request A type of signed request that allows uploads through HTML forms directly to Cloud Storage with temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised by a user. -For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well -as the documentation of BucketHandle.GenerateSignedPostPolicyV4. +For more information, please see the [XML POST Object docs] as well +as the documentation of [BucketHandle.GenerateSignedPostPolicyV4]. - pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + +# Credential requirements for signing + +If the GoogleAccessID and PrivateKey option fields are not provided, they will +be automatically detected by [BucketHandle.SignedURL] and +[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true: + - you are authenticated to the Storage Client with a service account's + downloaded private key, either directly in code or by setting the + GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]), + - your application is running on Google Compute Engine (GCE), or + - you are logged into [gcloud using application default credentials] + with [impersonation enabled]. + +Detecting GoogleAccessID may not be possible if you are authenticated using a +token source or using [option.WithHTTPClient]. In this case, you can provide a +service account email for GoogleAccessID and the client will attempt to sign +the URL or Post Policy using that service account. -Errors +To generate the signature, you must have: + - iam.serviceAccounts.signBlob permissions on the GoogleAccessID service + account, and + - the [IAM Service Account Credentials API] enabled (unless authenticating + with a downloaded private key). -Errors returned by this client are often of the type googleapi.Error. -These errors can be introspected for more information by using errors.As -with the richer googleapi.Error type. For example: +# Errors + +Errors returned by this client are often of the type [googleapi.Error]. +These errors can be introspected for more information by using [errors.As] +with the richer [googleapi.Error] type. For example: var e *googleapi.Error if ok := errors.As(err, &e); ok { if e.Code == 409 { ... } } -See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information. - -Retrying failed requests +# Retrying failed requests Methods in this package may retry calls that fail with transient errors. Retrying continues indefinitely unless the controlling context is canceled, the @@ -271,12 +290,12 @@ continuing, use context timeouts or cancellation. The retry strategy in this library follows best practices for Cloud Storage. By default, operations are retried only if they are idempotent, and exponential backoff with jitter is employed. In addition, errors are only retried if they -are defined as transient by the service. See -https://cloud.google.com/storage/docs/retry-strategy for more information. +are defined as transient by the service. See the [Cloud Storage retry docs] +for more information. Users can configure non-default retry behavior for a single library call (using -BucketHandle.Retryer and ObjectHandle.Retryer) or for all calls made by a -client (using Client.SetRetry). For example: +[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a +client (using [Client.SetRetry]). For example: o := client.Bucket(bucket).Object(object).Retryer( // Use WithBackoff to change the timing of the exponential backoff. @@ -297,5 +316,13 @@ client (using Client.SetRetry). For example: if err := o.Delete(ctx); err != nil { // Handle err. } + +[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam +[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object +[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy +[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth +[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login +[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account +[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index db914d61f32..049fbae2f8e 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -16,17 +16,24 @@ package storage import ( "context" + "encoding/base64" + "fmt" + "io" + "net/url" "os" + "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "google.golang.org/api/iterator" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" iampb "google.golang.org/genproto/googleapis/iam/v1" - storagepb "google.golang.org/genproto/googleapis/storage/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) @@ -36,19 +43,28 @@ const ( // connection pool may be necessary for jobs that require // high throughput and/or leverage many concurrent streams. // - // This is an experimental API and not intended for public use. + // This is only used for the gRPC client. defaultConnPoolSize = 4 + // maxPerMessageWriteSize is the maximum amount of content that can be sent + // per WriteObjectRequest message. A buffer reaching this amount will + // precipitate a flush of the buffer. It is only used by the gRPC Writer + // implementation. + maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES) + // globalProjectAlias is the project ID alias used for global buckets. // // This is only used for the gRPC API. globalProjectAlias = "_" + + // msgEntityNotSupported indicates ACL entites using project ID are not currently supported. + // + // This is only used for the gRPC API. + msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead" ) // defaultGRPCOptions returns a set of the default client options // for gRPC client initialization. -// -// This is an experimental API and not intended for public use. func defaultGRPCOptions() []option.ClientOption { defaults := []option.ClientOption{ option.WithGRPCConnectionPool(defaultConnPoolSize), @@ -73,6 +89,9 @@ func defaultGRPCOptions() []option.ClientOption { option.WithGRPCDialOption(grpc.WithInsecure()), option.WithoutAuthentication(), ) + } else { + // Only enable DirectPath when the emulator is not being targeted. + defaults = append(defaults, internaloption.EnableDirectPath(true)) } return defaults @@ -80,8 +99,6 @@ func defaultGRPCOptions() []option.ClientOption { // grpcStorageClient is the gRPC API implementation of the transport-agnostic // storageClient interface. -// -// This is an experimental API and not intended for public use. type grpcStorageClient struct { raw *gapic.Client settings *settings @@ -89,8 +106,6 @@ type grpcStorageClient struct { // newGRPCStorageClient initializes a new storageClient that uses the gRPC // Storage API. -// -// This is an experimental API and not intended for public use. func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) s.clientOption = append(defaultGRPCOptions(), s.clientOption...) @@ -129,10 +144,10 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin return resp.EmailAddress, err } -func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { +func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) b := attrs.toProtoBucket() - + b.Name = bucket // If there is lifecycle information but no location, explicitly set // the location. This is a GCS quirk/bug. if b.GetLocation() == "" && b.GetLifecycle() != nil { @@ -140,11 +155,13 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, at } req := &storagepb.CreateBucketRequest{ - Parent: toProjectResource(project), - Bucket: b, - BucketId: b.GetName(), - PredefinedAcl: attrs.PredefinedACL, - PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL, + Parent: toProjectResource(project), + Bucket: b, + BucketId: b.GetName(), + } + if attrs != nil { + req.PredefinedAcl = attrs.PredefinedACL + req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL } var battrs *BucketAttrs @@ -311,6 +328,8 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat // In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared. fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. if uattrs.acl != nil { // In cases where acl is set by UpdateBucketACL method. fieldMask.Paths = append(fieldMask.Paths, "acl") @@ -404,14 +423,123 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q // Object metadata methods. -func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error { - return errMethodNotSupported +func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + Object: object, + } + if err := applyCondsProto("grpcStorageClient.DeleteObject", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + err := run(ctx, func() error { + return c.raw.DeleteObject(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + return err } -func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - return nil, errMethodNotSupported + +func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetObjectRequest{ + Bucket: bucketResourceName(globalProjectAlias, bucket), + Object: object, + } + if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + } + + var attrs *ObjectAttrs + err := run(ctx, func() error { + res, err := c.raw.GetObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err } -func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - return nil, errMethodNotSupported + +func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) + req := &storagepb.UpdateObjectRequest{ + Object: o, + PredefinedAcl: uattrs.PredefinedACL, + } + if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + } + + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if uattrs.EventBasedHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "event_based_hold") + } + if uattrs.TemporaryHold != nil { + fieldMask.Paths = append(fieldMask.Paths, "temporary_hold") + } + if uattrs.ContentType != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_type") + } + if uattrs.ContentLanguage != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_language") + } + if uattrs.ContentEncoding != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_encoding") + } + if uattrs.ContentDisposition != nil { + fieldMask.Paths = append(fieldMask.Paths, "content_disposition") + } + if uattrs.CacheControl != nil { + fieldMask.Paths = append(fieldMask.Paths, "cache_control") + } + if !uattrs.CustomTime.IsZero() { + fieldMask.Paths = append(fieldMask.Paths, "custom_time") + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + if uattrs.ACL != nil { + fieldMask.Paths = append(fieldMask.Paths, "acl") + } + // TODO(cathyo): Handle metadata. Pending b/230510191. + + req.UpdateMask = fieldMask + + var attrs *ObjectAttrs + err := run(ctx, func() error { + res, err := c.raw.UpdateObject(ctx, req, s.gax...) + attrs = newObjectFromProto(res) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { + return nil, ErrObjectNotExist + } + + return attrs, err } // Default Object ACL methods. @@ -424,11 +552,21 @@ func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s return err } // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true var acl []ACLRule for _, a := range attrs.DefaultObjectACL { if a.Entity != entity { acl = append(acl, a) } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported) } uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} // Call UpdateBucket with a MetagenerationMatch precondition set. @@ -437,6 +575,7 @@ func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s } return nil } + func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { attrs, err := c.GetBucket(ctx, bucket, nil, opts...) if err != nil { @@ -444,8 +583,25 @@ func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st } return attrs.DefaultObjectACL, nil } -func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) { - return nil, errMethodNotSupported + +func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve BucketAttrs. + attrs, err := c.GetBucket(ctx, bucket, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.DefaultObjectACL, aclRule) + uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} + // Call UpdateBucket with a MetagenerationMatch precondition set. + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err + } + return nil } // Bucket ACL methods. @@ -458,11 +614,21 @@ func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, return err } // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true var acl []ACLRule for _, a := range attrs.ACL { if a.Entity != entity { acl = append(acl, a) } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) } uattrs := &BucketAttrsToUpdate{acl: acl} // Call UpdateBucket with a MetagenerationMatch precondition set. @@ -471,6 +637,7 @@ func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, } return nil } + func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { attrs, err := c.GetBucket(ctx, bucket, nil, opts...) if err != nil { @@ -479,51 +646,389 @@ func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, o return attrs.ACL, nil } -func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) { +func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { // There is no separate API for PATCH in gRPC. // Make a GET call first to retrieve BucketAttrs. attrs, err := c.GetBucket(ctx, bucket, nil, opts...) if err != nil { - return nil, err + return err } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. var acl []ACLRule aclRule := ACLRule{Entity: entity, Role: role} acl = append(attrs.ACL, aclRule) uattrs := &BucketAttrsToUpdate{acl: acl} // Call UpdateBucket with a MetagenerationMatch precondition set. - _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...) - if err != nil { - return nil, err + if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { + return err } - return &aclRule, err + return nil } // Object ACL methods. func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { - return errMethodNotSupported + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return err + } + // Delete the entity and copy other remaining ACL entities. + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + // Return error if entity is not found or a project ID is used. + invalidEntity := true + var acl []ACLRule + for _, a := range attrs.ACL { + if a.Entity != entity { + acl = append(acl, a) + } + if a.Entity == entity { + invalidEntity = false + } + } + if invalidEntity { + return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) + } + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + return err + } + return nil } + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - return nil, errMethodNotSupported + o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return nil, err + } + return o.ACL, nil } -func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) { - return nil, errMethodNotSupported + +func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + // There is no separate API for PATCH in gRPC. + // Make a GET call first to retrieve ObjectAttrs. + attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) + if err != nil { + return err + } + // Note: This API currently does not support entites using project ID. + // Use project numbers in ACL entities. Pending b/233617896. + var acl []ACLRule + aclRule := ACLRule{Entity: entity, Role: role} + acl = append(attrs.ACL, aclRule) + uattrs := &ObjectAttrsToUpdate{ACL: acl} + // Call UpdateObject with the specified metageneration. + if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + return err + } + return nil } // Media operations. func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { - return nil, errMethodNotSupported + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) + dstObjPb.Name = req.dstObject.name + if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil { + return nil, err + } + if req.sendCRC32C { + dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C + } + + srcs := []*storagepb.ComposeObjectRequest_SourceObject{} + for _, src := range req.srcs { + srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name} + if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil { + return nil, err + } + srcs = append(srcs, srcObjPb) + } + + rawReq := &storagepb.ComposeObjectRequest{ + Destination: dstObjPb, + SourceObjects: srcs, + } + if req.predefinedACL != "" { + rawReq.DestinationPredefinedAcl = req.predefinedACL + } + if req.dstObject.encryptionKey != nil { + rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + + var obj *storagepb.Object + var err error + if err := run(ctx, func() error { + obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + return nil, err + } + + return newObjectFromProto(obj), nil } func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { - return nil, errMethodNotSupported + s := callSettings(c.settings, opts...) + obj := req.dstObject.attrs.toProtoObject("") + call := &storagepb.RewriteObjectRequest{ + SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), + SourceObject: req.srcObject.name, + RewriteToken: req.token, + DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), + DestinationName: req.dstObject.name, + Destination: obj, + DestinationKmsKey: req.dstObject.keyName, + DestinationPredefinedAcl: req.predefinedACL, + } + + // The userProject, whether source or destination project, is decided by the code calling the interface. + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + + if len(req.dstObject.encryptionKey) > 0 { + call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) + } + if len(req.srcObject.encryptionKey) > 0 { + srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey) + call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm() + call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes() + call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes() + } + var res *storagepb.RewriteResponse + var err error + + retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.GetDone(), + written: res.GetTotalBytesRewritten(), + size: res.GetObjectSize(), + token: res.GetRewriteToken(), + resource: newObjectFromProto(res.GetResource()), + } + + return r, nil } -func (c *grpcStorageClient) OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error { - return errMethodNotSupported +func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + + // A negative length means "read to the end of the object", but the + // read_limit field it corresponds to uses zero to mean the same thing. Thus + // we coerce the length to 0 to read to the end of the object. + if params.length < 0 { + params.length = 0 + } + + b := bucketResourceName(globalProjectAlias, params.bucket) + // TODO(noahdietz): Use encryptionKey to set relevant request fields. + req := &storagepb.ReadObjectRequest{ + Bucket: b, + Object: params.object, + } + // The default is a negative value, which means latest. + if params.gen >= 0 { + req.Generation = params.gen + } + + // Define a function that initiates a Read with offset and length, assuming + // we have already read seen bytes. + reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { + // If the context has already expired, return immediately without making + // we call. + if err := ctx.Err(); err != nil { + return nil, nil, err + } + + cc, cancel := context.WithCancel(ctx) + + start := params.offset + seen + // Only set a ReadLimit if length is greater than zero, because zero + // means read it all. + if params.length > 0 { + req.ReadLimit = params.length - seen + } + req.ReadOffset = start + + if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { + cancel() + return nil, nil, err + } + + var stream storagepb.Storage_ReadObjectClient + var msg *storagepb.ReadObjectResponse + var err error + + err = run(cc, func() error { + stream, err = c.raw.ReadObject(cc, req, s.gax...) + if err != nil { + return err + } + + msg, err = stream.Recv() + // These types of errors show up on the Recv call, rather than the + // initialization of the stream via ReadObject above. + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + return ErrObjectNotExist + } + + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + // Close the stream context we just created to ensure we don't leak + // resources. + cancel() + return nil, nil, err + } + + return &readStreamResponse{stream, msg}, cancel, nil + } + + res, cancel, err := reopen(0) + if err != nil { + return nil, err + } + + // The first message was Recv'd on stream open, use it to populate the + // object metadata. + msg := res.response + obj := msg.GetMetadata() + // This is the size of the entire object, even if only a range was requested. + size := obj.GetSize() + + r = &Reader{ + Attrs: ReaderObjectAttrs{ + Size: size, + ContentType: obj.GetContentType(), + ContentEncoding: obj.GetContentEncoding(), + CacheControl: obj.GetCacheControl(), + LastModified: obj.GetUpdateTime().AsTime(), + Metageneration: obj.GetMetageneration(), + Generation: obj.GetGeneration(), + }, + reader: &gRPCReader{ + stream: res.stream, + reopen: reopen, + cancel: cancel, + size: size, + // Store the content from the first Recv in the + // client buffer for reading later. + leftovers: msg.GetChecksummedData().GetContent(), + }, + } + + cr := msg.GetContentRange() + if cr != nil { + r.Attrs.StartOffset = cr.GetStart() + r.remain = cr.GetEnd() - cr.GetStart() + 1 + } else { + r.remain = size + } + + // Only support checksums when reading an entire object, not a range. + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length == 0 { + r.wantCRC = checksums.GetCrc32C() + r.checkCRC = true + } + + return r, nil } -func (c *grpcStorageClient) OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error { - return errMethodNotSupported + +func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + var offset int64 + errorf := params.setError + progress := params.progress + setObj := params.setObj + + pr, pw := io.Pipe() + gw := newGRPCWriter(c, params, pr) + + // This function reads the data sent to the pipe and sends sets of messages + // on the gRPC client-stream as the buffer is filled. + go func() { + defer close(params.donec) + + // Loop until there is an error or the Object has been finalized. + for { + // Note: This blocks until either the buffer is full or EOF is read. + recvd, doneReading, err := gw.read() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + + // TODO(noahdietz): Send encryption key via CommonObjectRequestParams. + + // The chunk buffer is full, but there is no end in sight. This + // means that a resumable upload will need to be used to send + // multiple chunks, until we are done reading data. Start a + // resumable upload if it has not already been started. + // Otherwise, all data will be sent over a single gRPC stream. + if !doneReading && gw.upid == "" { + err = gw.startResumableUpload() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + } + + o, off, finalized, err := gw.uploadBuffer(recvd, offset, doneReading) + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + // At this point, the current buffer has been uploaded. Capture the + // committed offset here in case the upload was not finalized and + // another chunk is to be uploaded. + offset = off + progress(offset) + + // When we are done reading data and the chunk has been finalized, + // we are done. + if doneReading && finalized { + // Build Object from server's response. + setObj(newObjectFromProto(o)) + return + } + } + }() + + return pw, nil } // IAM methods. @@ -583,20 +1088,211 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str // HMAC Key methods. -func (c *grpcStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) { - return nil, errMethodNotSupported +func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.GetHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func() error { + var err error + metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + req := &storagepb.ListHmacKeysRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + ShowDeletedKeys: showDeletedKeys, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + it := &HMACKeysIterator{ + ctx: ctx, + projectID: project, + retry: s.retry, + } + gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...) + fetch := func(pageSize int, pageToken string) (token string, err error) { + var hmacKeys []*storagepb.HmacKeyMetadata + err = run(it.ctx, func() error { + hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return "", err + } + for _, hkmd := range hmacKeys { + hk := toHMACKeyFromProto(hkmd) + it.hmacKeys = append(it.hmacKeys, hk) + } + + return token, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + hk := &storagepb.HmacKeyMetadata{ + AccessId: accessID, + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + State: string(attrs.State), + Etag: attrs.Etag, + } + var paths []string + fieldMask := &fieldmaskpb.FieldMask{ + Paths: paths, + } + if attrs.State != "" { + fieldMask.Paths = append(fieldMask.Paths, "state") + } + req := &storagepb.UpdateHmacKeyRequest{ + HmacKey: hk, + UpdateMask: fieldMask, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var metadata *storagepb.HmacKeyMetadata + err := run(ctx, func() error { + var err error + metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toHMACKeyFromProto(metadata), nil +} + +func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + req := &storagepb.CreateHmacKeyRequest{ + Project: toProjectResource(project), + ServiceAccountEmail: serviceAccountEmail, + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + var res *storagepb.CreateHmacKeyResponse + err := run(ctx, func() error { + var err error + res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + key := toHMACKeyFromProto(res.Metadata) + key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) + + return key, nil } -func (c *grpcStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator { - return &HMACKeysIterator{} + +func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteHmacKeyRequest{ + AccessId: accessID, + Project: toProjectResource(project), + } + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + return run(ctx, func() error { + return c.raw.DeleteHmacKey(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) } -func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - return nil, errMethodNotSupported + +// Notification methods. + +func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + if s.userProject != "" { + ctx = setUserProjectMetadata(ctx, s.userProject) + } + req := &storagepb.ListNotificationsRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + } + var notifications []*storagepb.Notification + err = run(ctx, func() error { + gitr := c.raw.ListNotifications(ctx, req, s.gax...) + for { + // PageSize is not set and fallbacks to the API default pageSize of 100. + items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) + if err != nil { + return err + } + notifications = append(notifications, items...) + // If there are no more results, nextPageToken is empty and err is nil. + if nextPageToken == "" { + return err + } + req.PageToken = nextPageToken + } + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + + return notificationsToMapFromProto(notifications), nil } -func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) { - return nil, errMethodNotSupported + +func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.CreateNotificationRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + Notification: toProtoNotification(n), + } + var pbn *storagepb.Notification + err = run(ctx, func() error { + var err error + pbn, err = c.raw.CreateNotification(ctx, req, s.gax...) + return err + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + if err != nil { + return nil, err + } + return toNotificationFromProto(pbn), err } -func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error { - return errMethodNotSupported + +func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + req := &storagepb.DeleteNotificationRequest{Name: id} + return run(ctx, func() error { + return c.raw.DeleteNotification(ctx, req, s.gax...) + }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) } // setUserProjectMetadata appends a project ID to the outgoing Context metadata @@ -607,3 +1303,397 @@ func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc func setUserProjectMetadata(ctx context.Context, project string) context.Context { return metadata.AppendToOutgoingContext(ctx, "x-goog-user-project", project) } + +type readStreamResponse struct { + stream storagepb.Storage_ReadObjectClient + response *storagepb.ReadObjectResponse +} + +type gRPCReader struct { + seen, size int64 + stream storagepb.Storage_ReadObjectClient + reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) + leftovers []byte + cancel context.CancelFunc +} + +// Read reads bytes into the user's buffer from an open gRPC stream. +func (r *gRPCReader) Read(p []byte) (int, error) { + // No stream to read from, either never initiliazed or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("reader has been closed") + } + + // The entire object has been read by this reader, return EOF. + if r.size != 0 && r.size == r.seen { + return 0, io.EOF + } + + var n int + // Read leftovers and return what was available to conform to the Reader + // interface: https://pkg.go.dev/io#Reader. + if len(r.leftovers) > 0 { + n = copy(p, r.leftovers) + r.seen += int64(n) + r.leftovers = r.leftovers[n:] + return n, nil + } + + // Attempt to Recv the next message on the stream. + msg, err := r.recv() + if err != nil { + return 0, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + content := msg.GetChecksummedData().GetContent() + n = copy(p[n:], content) + leftover := len(content) - n + if leftover > 0 { + // Wasn't able to copy all of the data in the message, store for + // future Read calls. + r.leftovers = content[n:] + } + r.seen += int64(n) + + return n, nil +} + +// Close cancels the read stream's context in order for it to be closed and +// collected. +func (r *gRPCReader) Close() error { + if r.cancel != nil { + r.cancel() + } + r.stream = nil + return nil +} + +// recv attempts to Recv the next message on the stream. In the event +// that a retryable error is encountered, the stream will be closed, reopened, +// and Recv again. This will attempt to Recv until one of the following is true: +// +// * Recv is successful +// * A non-retryable error is encountered +// * The Reader's context is canceled +// +// The last error received is the one that is returned, which could be from +// an attempt to reopen the stream. +func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { + msg, err := r.stream.Recv() + if err != nil && ShouldRetry(err) { + // This will "close" the existing stream and immediately attempt to + // reopen the stream, but will backoff if further attempts are necessary. + // Reopening the stream Recvs the first message, so if retrying is + // successful, the next logical chunk will be returned. + msg, err = r.reopenStream() + } + + return msg, err +} + +// reopenStream "closes" the existing stream and attempts to reopen a stream and +// sets the Reader's stream and cancelStream properties in the process. +func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { + // Close existing stream and initialize new stream with updated offset. + r.Close() + + res, cancel, err := r.reopen(r.seen) + if err != nil { + return nil, err + } + r.stream = res.stream + r.cancel = cancel + return res.response, nil +} + +func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { + size := params.chunkSize + if params.chunkSize == 0 { + // TODO: Should we actually use the minimum of 256 KB here when the user + // indicates they want minimal memory usage? We cannot do a zero-copy, + // bufferless upload like HTTP/JSON can. + // TODO: We need to determine if we can avoid starting a + // resumable upload when the user *plans* to send more than bufSize but + // with a bufferless upload. + size = maxPerMessageWriteSize + } + + return &gRPCWriter{ + buf: make([]byte, size), + c: c, + ctx: params.ctx, + reader: r, + bucket: params.bucket, + attrs: params.attrs, + conds: params.conds, + encryptionKey: params.encryptionKey, + sendCRC32C: params.sendCRC32C, + } +} + +// gRPCWriter is a wrapper around the the gRPC client-stream API that manages +// sending chunks of data provided by the user over the stream. +type gRPCWriter struct { + c *grpcStorageClient + buf []byte + reader io.Reader + + ctx context.Context + + bucket string + attrs *ObjectAttrs + conds *Conditions + encryptionKey []byte + + sendCRC32C bool + + // The gRPC client-stream used for sending buffers. + stream storagepb.Storage_WriteObjectClient + + // The Resumable Upload ID started by a gRPC-based Writer. + upid string +} + +// startResumableUpload initializes a Resumable Upload with gRPC and sets the +// upload ID on the Writer. +func (w *gRPCWriter) startResumableUpload() error { + spec, err := w.writeObjectSpec() + if err != nil { + return err + } + upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{ + WriteObjectSpec: spec, + }) + + w.upid = upres.GetUploadId() + return err +} + +// queryProgress is a helper that queries the status of the resumable upload +// associated with the given upload ID. +func (w *gRPCWriter) queryProgress() (int64, error) { + q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid}) + + // q.GetCommittedSize() will return 0 if q is nil. + return q.GetPersistedSize(), err +} + +// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if +// uploading a chunk for a resumable uploadBuffer), and will mark the write as +// finished if we are done receiving data from the user. The resulting write +// offset after uploading the buffer is returned, as well as a boolean +// indicating if the Object has been finalized. If it has been finalized, the +// final Object will be returned as well. Finalizing the upload is primarily +// important for Resumable Uploads. A simple or multi-part upload will always +// be finalized once the entire buffer has been written. +func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) { + var err error + var finishWrite bool + var sent, limit int = 0, maxPerMessageWriteSize + offset := start + toWrite := w.buf[:recvd] + for { + first := sent == 0 + // This indicates that this is the last message and the remaining + // data fits in one message. + belowLimit := recvd-sent <= limit + if belowLimit { + limit = recvd - sent + } + if belowLimit && doneReading { + finishWrite = true + } + + // Prepare chunk section for upload. + data := toWrite[sent : sent+limit] + req := &storagepb.WriteObjectRequest{ + Data: &storagepb.WriteObjectRequest_ChecksummedData{ + ChecksummedData: &storagepb.ChecksummedData{ + Content: data, + }, + }, + WriteOffset: offset, + FinishWrite: finishWrite, + } + + // Open a new stream and set the first_message field on the request. + // The first message on the WriteObject stream must either be the + // Object or the Resumable Upload ID. + if first { + ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket))) + w.stream, err = w.c.raw.WriteObject(ctx) + if err != nil { + return nil, 0, false, err + } + + if w.upid != "" { + req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid} + } else { + spec, err := w.writeObjectSpec() + if err != nil { + return nil, 0, false, err + } + req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ + WriteObjectSpec: spec, + } + } + + // TODO: Currently the checksums are only sent on the first message + // of the stream, but in the future, we must also support sending it + // on the *last* message of the stream (instead of the first). + if w.sendCRC32C { + req.ObjectChecksums = &storagepb.ObjectChecksums{ + Crc32C: proto.Uint32(w.attrs.CRC32C), + Md5Hash: w.attrs.MD5, + } + } + } + + err = w.stream.Send(req) + if err == io.EOF { + // err was io.EOF. The client-side of a stream only gets an EOF on Send + // when the backend closes the stream and wants to return an error + // status. Closing the stream receives the status as an error. + _, err = w.stream.CloseAndRecv() + + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if ShouldRetry(err) { + sent = 0 + finishWrite = false + // TODO: Add test case for failure modes of querying progress. + offset, err = w.determineOffset(start) + if err == nil { + continue + } + } + } + if err != nil { + return nil, 0, false, err + } + + // Update the immediate stream's sent total and the upload offset with + // the data sent. + sent += len(data) + offset += int64(len(data)) + + // Not done sending data, do not attempt to commit it yet, loop around + // and send more data. + if recvd-sent > 0 { + continue + } + + // Done sending data. Close the stream to "commit" the data sent. + resp, finalized, err := w.commit() + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if ShouldRetry(err) { + sent = 0 + finishWrite = false + offset, err = w.determineOffset(start) + if err == nil { + continue + } + } + if err != nil { + return nil, 0, false, err + } + + return resp.GetResource(), offset, finalized, nil + } +} + +// determineOffset either returns the offset given to it in the case of a simple +// upload, or queries the write status in the case a resumable upload is being +// used. +func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { + // For a Resumable Upload, we must start from however much data + // was committed. + if w.upid != "" { + committed, err := w.queryProgress() + if err != nil { + return 0, err + } + offset = committed + } + return offset, nil +} + +// commit closes the stream to commit the data sent and potentially receive +// the finalized object if finished uploading. If the last request sent +// indicated that writing was finished, the Object will be finalized and +// returned. If not, then the Object will be nil, and the boolean returned will +// be false. +func (w *gRPCWriter) commit() (*storagepb.WriteObjectResponse, bool, error) { + finalized := true + resp, err := w.stream.CloseAndRecv() + if err == io.EOF { + // Closing a stream for a resumable upload finish_write = false results + // in an EOF which can be ignored, as we aren't done uploading yet. + finalized = false + err = nil + } + // Drop the stream reference as it has been closed. + w.stream = nil + + return resp, finalized, err +} + +// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's +// ObjectAttrs and applies its Conditions. This is only used for gRPC. +func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { + // To avoid modifying the ObjectAttrs embeded in the calling writer, deref + // the ObjectAttrs pointer to make a copy, then assign the desired name to + // the attribute. + attrs := *w.attrs + + spec := &storagepb.WriteObjectSpec{ + Resource: attrs.toProtoObject(w.bucket), + } + // WriteObject doesn't support the generation condition, so use default. + if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { + return nil, err + } + return spec, nil +} + +// read copies the data in the reader to the given buffer and reports how much +// data was read into the buffer and if there is no more data to read (EOF). +func (w *gRPCWriter) read() (int, bool, error) { + // Set n to -1 to start the Read loop. + var n, recvd int = -1, 0 + var err error + for err == nil && n != 0 { + // The routine blocks here until data is received. + n, err = w.reader.Read(w.buf[recvd:]) + recvd += n + } + var done bool + if err == io.EOF { + done = true + err = nil + } + return recvd, done, err +} + +func checkCanceled(err error) error { + if status.Code(err) == codes.Canceled { + return context.Canceled + } + + return err +} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 2de721e8154..d21fba141c0 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -20,6 +20,7 @@ import ( "fmt" "time" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) @@ -90,7 +91,7 @@ type HMACKeyHandle struct { projectID string accessID string retry *retryConfig - raw *raw.ProjectsHmacKeysService + tc storageClient } // HMACKeyHandle creates a handle that will be used for HMACKey operations. @@ -101,7 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { projectID: projectID, accessID: accessID, retry: c.retry, - raw: raw.NewProjectsHmacKeysService(c.raw), + tc: c.tc, } } @@ -113,32 +114,15 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // // This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { - call := hkh.raw.Get(hkh.projectID, hkh.accessID) - desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - setClientHeader(call.Header()) + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...) - var metadata *raw.HmacKeyMetadata - var err error - err = run(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }, hkh.retry, true, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - - hkPb := &raw.HmacKey{ - Metadata: metadata, - } - return pbHmacKeyToHMACKey(hkPb, false) + return hk, err } // Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. @@ -147,49 +131,59 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC // // This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { - delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID) desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - delCall = delCall.UserProject(desc.userProjectID) - } - setClientHeader(delCall.Header()) - return run(ctx, func() error { - return delCall.Context(ctx).Do() - }, hkh.retry, true, setRetryHeaderHTTP(delCall)) + o := makeStorageOpts(true, hkh.retry, desc.userProjectID) + return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...) } -func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { - pbmd := pb.Metadata - if pbmd == nil { +func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { + hkmd := hk.Metadata + if hkmd == nil { return nil, errors.New("field Metadata cannot be nil") } - createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated) + createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) if err != nil { return nil, fmt.Errorf("field CreatedTime: %v", err) } - updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated) + updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) if err != nil && !updatedTimeCanBeNil { return nil, fmt.Errorf("field UpdatedTime: %v", err) } - hmk := &HMACKey{ - AccessID: pbmd.AccessId, - Secret: pb.Secret, - Etag: pbmd.Etag, - ID: pbmd.Id, - State: HMACState(pbmd.State), - ProjectID: pbmd.ProjectId, + hmKey := &HMACKey{ + AccessID: hkmd.AccessId, + Secret: hk.Secret, + Etag: hkmd.Etag, + ID: hkmd.Id, + State: HMACState(hkmd.State), + ProjectID: hkmd.ProjectId, CreatedTime: createdTime, UpdatedTime: updatedTime, - ServiceAccountEmail: pbmd.ServiceAccountEmail, + ServiceAccountEmail: hkmd.ServiceAccountEmail, + } + + return hmKey, nil +} + +func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { + if pbmd == nil { + return nil } - return hmk, nil + return &HMACKey{ + AccessID: pbmd.GetAccessId(), + ID: pbmd.GetId(), + State: HMACState(pbmd.GetState()), + ProjectID: pbmd.GetProject(), + CreatedTime: convertProtoTime(pbmd.GetCreateTime()), + UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), + ServiceAccountEmail: pbmd.GetServiceAccountEmail(), + } } // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. @@ -203,29 +197,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma return nil, errors.New("storage: expecting a non-blank service account email") } - svc := raw.NewProjectsHmacKeysService(c.raw) - call := svc.Create(projectID, serviceAccountEmail) desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - - setClientHeader(call.Header()) - - var hkPb *raw.HmacKey - - if err := run(ctx, func() error { - h, err := call.Context(ctx).Do() - hkPb = h - return err - }, c.retry, false, setRetryHeaderHTTP(call)); err != nil { - return nil, err - } - return pbHmacKeyToHMACKey(hkPb, true) + o := makeStorageOpts(false, c.retry, desc.userProjectID) + hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...) + return hk, err } // HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. @@ -247,35 +226,15 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) } - call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{ - Etag: au.Etag, - State: string(au.State), - }) - desc := new(hmacKeyDesc) for _, opt := range opts { opt.withHMACKeyDesc(desc) } - if desc.userProjectID != "" { - call = call.UserProject(desc.userProjectID) - } - setClientHeader(call.Header()) - var metadata *raw.HmacKeyMetadata - var err error isIdempotent := len(au.Etag) > 0 - err = run(ctx, func() error { - metadata, err = call.Context(ctx).Do() - return err - }, h.retry, isIdempotent, setRetryHeaderHTTP(call)) - - if err != nil { - return nil, err - } - hkPb := &raw.HmacKey{ - Metadata: metadata, - } - return pbHmacKeyToHMACKey(hkPb, false) + o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID) + hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...) + return hk, err } // An HMACKeysIterator is an iterator over HMACKeys. @@ -301,27 +260,13 @@ type HMACKeysIterator struct { // // This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { - it := &HMACKeysIterator{ - ctx: ctx, - raw: raw.NewProjectsHmacKeysService(c.raw), - projectID: projectID, - retry: c.retry, - } - + desc := new(hmacKeyDesc) for _, opt := range opts { - opt.withHMACKeyDesc(&it.desc) + opt.withHMACKeyDesc(desc) } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - it.fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) - return it + o := makeStorageOpts(true, c.retry, desc.userProjectID) + return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...) } // Next returns the next result. Its second return value is iterator.Done if @@ -350,6 +295,8 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) { func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { + // TODO: Remove fetch method upon integration. This method is internalized into + // httpStorageClient.ListHMACKeys() as it is the only caller. call := it.raw.List(it.projectID) setClientHeader(call.Header()) if pageToken != "" { @@ -379,10 +326,10 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, } for _, metadata := range resp.Items { - hkPb := &raw.HmacKey{ + hk := &raw.HmacKey{ Metadata: metadata, } - hkey, err := pbHmacKeyToHMACKey(hkPb, true) + hkey, err := toHMACKeyFromRaw(hk, true) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 1ec55150f50..a589d3d0fba 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -16,14 +16,21 @@ package storage import ( "context" + "encoding/base64" "errors" "fmt" + "io" + "io/ioutil" "net/http" "net/url" "os" "reflect" + "strconv" "strings" + "time" + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -152,7 +159,7 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin return res.EmailAddress, nil } -func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { +func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) var bkt *raw.Bucket if attrs != nil { @@ -160,7 +167,7 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, at } else { bkt = &raw.Bucket{} } - + bkt.Name = bucket // If there is lifecycle information but no location, explicitly set // the location. This is a GCS quirk/bug. if bkt.Location == "" && bkt.Lifecycle != nil { @@ -378,14 +385,143 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q // Object metadata methods. -func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error { - return errMethodNotSupported +func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Delete(bucket, object).Context(ctx) + if err := applyConds("Delete", gen, conds, req); err != nil { + return err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + return err } -func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - return nil, errMethodNotSupported + +func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) + if err := applyConds("Attrs", gen, conds, req); err != nil { + return nil, err + } + if s.userProject != "" { + req.UserProject(s.userProject) + } + if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = run(ctx, func() error { + obj, err = req.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + var e *googleapi.Error + if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil } -func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - return nil, errMethodNotSupported + +func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { + s := callSettings(c.settings, opts...) + + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.EventBasedHold != nil { + attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) + forceSendFields = append(forceSendFields, "EventBasedHold") + } + if uattrs.TemporaryHold != nil { + attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + forceSendFields = append(forceSendFields, "TemporaryHold") + } + if !uattrs.CustomTime.IsZero() { + attrs.CustomTime = uattrs.CustomTime + forceSendFields = append(forceSendFields, "CustomTime") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", gen, conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if uattrs.PredefinedACL != "" { + call.PredefinedAcl(uattrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + var e *googleapi.Error + if errors.As(err, &e) && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil } // Default Object ACL methods. @@ -412,8 +548,25 @@ func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st } return toObjectACLRules(acls.Items), nil } -func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) { - return nil, errMethodNotSupported +func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + var err error + req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) } // Bucket ACL methods. @@ -441,7 +594,7 @@ func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, o return toBucketACLRules(acls.Items), nil } -func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) { +func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { s := callSettings(c.settings, opts...) acl := &raw.BucketAccessControl{ Bucket: bucket, @@ -450,17 +603,11 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, } req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) configureACLCall(ctx, s.userProject, req) - var aclRule ACLRule var err error - err = run(ctx, func() error { - acl, err = req.Do() - aclRule = toBucketACLRule(acl) + return run(ctx, func() error { + _, err = req.Do() return err }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) - if err != nil { - return nil, err - } - return &aclRule, nil } // configureACLCall sets the context, user project and headers on the apiary library call. @@ -477,29 +624,440 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H // Object ACL methods. func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { - return errMethodNotSupported + s := callSettings(c.settings, opts...) + req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) } + +// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. +// Selecting a specific generation of this object is not currently supported by the client. func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - return nil, errMethodNotSupported + s := callSettings(c.settings, opts...) + var acls *raw.ObjectAccessControls + var err error + req := c.raw.ObjectAccessControls.List(bucket, object) + configureACLCall(ctx, s.userProject, req) + err = run(ctx, func() error { + acls, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil } -func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) { - return nil, errMethodNotSupported + +func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + var err error + req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) + configureACLCall(ctx, s.userProject, req) + return run(ctx, func() error { + _, err = req.Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) } // Media operations. func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { - return nil, errMethodNotSupported + s := callSettings(c.settings, opts...) + rawReq := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket) + if req.sendCRC32C { + rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C) + } + for _, src := range req.srcs { + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.name, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) + } + + call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx) + if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + + var err error + retryCall := func() error { obj, err = call.Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + return newObject(obj), nil } func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { - return nil, errMethodNotSupported + s := callSettings(c.settings, opts...) + rawObject := req.dstObject.attrs.toRawObject("") + call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) + + call.Context(ctx).Projection("full") + if req.token != "" { + call.RewriteToken(req.token) + } + if req.dstObject.keyName != "" { + call.DestinationKmsKeyName(req.dstObject.keyName) + } + if req.predefinedACL != "" { + call.DestinationPredefinedAcl(req.predefinedACL) + } + if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { + return nil, err + } + if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { + return nil, err + } + if s.userProject != "" { + call.UserProject(s.userProject) + } + // Set destination encryption headers. + if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { + return nil, err + } + // Set source encryption headers. + if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + + retryCall := func() error { res, err = call.Do(); return err } + + if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + + r := &rewriteObjectResponse{ + done: res.Done, + written: res.TotalBytesRewritten, + size: res.ObjectSize, + token: res.RewriteToken, + resource: newObject(res.Resource), + } + + return r, nil } -func (c *httpStorageClient) OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error { - return errMethodNotSupported +func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + + u := &url.URL{ + Scheme: c.scheme, + Host: c.readHost, + Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + } + verb := "GET" + if params.length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if s.userProject != "" { + req.Header.Set("X-Goog-User-Project", s.userProject) + } + if params.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil { + return nil, err + } + + // Define a function that initiates a Read with offset and length, assuming we + // have already read seen bytes. + reopen := func(seen int64) (*http.Response, error) { + // If the context has already expired, return immediately without making a + // call. + if err := ctx.Err(); err != nil { + return nil, err + } + start := params.offset + seen + if params.length < 0 && start < 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) + } else if params.length < 0 && start > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if params.length > 0 { + // The end character isn't affected by how many bytes we've seen. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + if err := setConditionsHeaders(req.Header, params.conds); err != nil { + return nil, err + } + // If an object generation is specified, include generation as query string parameters. + if params.gen >= 0 { + req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) + } + + var res *http.Response + err = run(ctx, func() error { + res, err = c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && params.length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + params.gen = gen64 + } + return nil + }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) + if err != nil { + return nil, err + } + return res, nil + } + + res, err := reopen(0) + if err != nil { + return nil, err + } + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + startOffset int64 // non-zero if range request. + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + // Content range is formatted -/. We take + // the total size. + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + + dashIndex := strings.Index(cr, "-") + if dashIndex >= 0 { + startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err) + } + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + if params.length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Metageneration") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + StartOffset: startOffset, + Generation: params.gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reader: &httpReader{ + reopen: reopen, + body: body, + }, + }, nil } -func (c *httpStorageClient) OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error { - return errMethodNotSupported + +func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { + s := callSettings(c.settings, opts...) + errorf := params.setError + setObj := params.setObj + progress := params.progress + attrs := params.attrs + + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(params.chunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + if params.chunkRetryDeadline != 0 { + mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline)) + } + + pr, pw := io.Pipe() + + go func() { + defer close(params.donec) + + rawObj := attrs.toRawObject(params.bucket) + if params.sendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if attrs.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(attrs.MD5) + } + call := c.raw.Objects.Insert(params.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(params.ctx). + Name(params.attrs.Name) + call.ProgressUpdater(func(n, _ int64) { progress(n) }) + + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } + if attrs.PredefinedACL != "" { + call.PredefinedAcl(attrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", defaultGen, params.conds, call) + if err == nil { + if s.userProject != "" { + call.UserProject(s.userProject) + } + // TODO(tritone): Remove this code when Uploads begin to support + // retry attempt header injection with "client header" injection. + setClientHeader(call.Header()) + + // The internals that perform call.Do automatically retry both the initial + // call to set up the upload as well as calls to upload individual chunks + // for a resumable upload (as long as the chunk size is non-zero). Hence + // there is no need to add retries here. + + // Retry only when the operation is idempotent or the retry policy is RetryAlways. + isIdempotent := params.conds != nil && (params.conds.GenerationMatch >= 0 || params.conds.DoesNotExist == true) + var useRetry bool + if (s.retry == nil || s.retry.policy == RetryIdempotent) && isIdempotent { + useRetry = true + } else if s.retry != nil && s.retry.policy == RetryAlways { + useRetry = true + } + if useRetry { + if s.retry != nil { + call.WithRetry(s.retry.backoff, s.retry.shouldRetry) + } else { + call.WithRetry(nil, nil) + } + } + resp, err = call.Do() + } + if err != nil { + errorf(err) + pr.CloseWithError(err) + return + } + setObj(newObject(resp)) + }() + + return pw, nil } // IAM methods. @@ -560,18 +1118,230 @@ func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource str // HMAC Key methods. -func (c *httpStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) { - return nil, errMethodNotSupported +func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Get(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) } -func (c *httpStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator { - return &HMACKeysIterator{} + +func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { + s := callSettings(c.settings, opts...) + it := &HMACKeysIterator{ + ctx: ctx, + raw: c.raw.Projects.HmacKeys, + projectID: project, + retry: s.retry, + } + fetch := func(pageSize int, pageToken string) (token string, err error) { + call := c.raw.Projects.HmacKeys.List(project) + setClientHeader(call.Header()) + if pageToken != "" { + call = call.PageToken(pageToken) + } + if pageSize > 0 { + call = call.MaxResults(int64(pageSize)) + } + if showDeletedKeys { + call = call.ShowDeletedKeys(true) + } + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + if serviceAccountEmail != "" { + call = call.ServiceAccountEmail(serviceAccountEmail) + } + + var resp *raw.HmacKeysMetadata + err = run(it.ctx, func() error { + resp, err = call.Context(it.ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return "", err + } + + for _, metadata := range resp.Items { + hk := &raw.HmacKey{ + Metadata: metadata, + } + hkey, err := toHMACKeyFromRaw(hk, true) + if err != nil { + return "", err + } + it.hmacKeys = append(it.hmacKeys, hkey) + } + return resp.NextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{ + Etag: attrs.Etag, + State: string(attrs.State), + }) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var metadata *raw.HmacKeyMetadata + var err error + if err := run(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + hk := &raw.HmacKey{ + Metadata: metadata, + } + return toHMACKeyFromRaw(hk, false) +} + +func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + + var hk *raw.HmacKey + if err := run(ctx, func() error { + h, err := call.Context(ctx).Do() + hk = h + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + return nil, err + } + return toHMACKeyFromRaw(hk, true) +} + +func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { + s := callSettings(c.settings, opts...) + call := c.raw.Projects.HmacKeys.Delete(project, accessID) + if s.userProject != "" { + call = call.UserProject(s.userProject) + } + return run(ctx, func() error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +// Notification methods. + +// ListNotifications returns all the Notifications configured for this bucket, as a map indexed by notification ID. +// +// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket, +// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets. +func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.List(bucket) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var res *raw.Notifications + err = run(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }, s.retry, true, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil } -func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - return nil, errMethodNotSupported + +func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Insert(bucket, toRawNotification(n)) + if s.userProject != "" { + call.UserProject(s.userProject) + } + var rn *raw.Notification + err = run(ctx, func() error { + rn, err = call.Context(ctx).Do() + return err + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + if err != nil { + return nil, err + } + return toNotification(rn), nil } -func (c *httpStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) { - return nil, errMethodNotSupported + +func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + s := callSettings(c.settings, opts...) + call := c.raw.Notifications.Delete(bucket, id) + if s.userProject != "" { + call.UserProject(s.userProject) + } + return run(ctx, func() error { + return call.Context(ctx).Do() + }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) +} + +type httpReader struct { + body io.ReadCloser + seen int64 + reopen func(seen int64) (*http.Response, error) +} + +func (r *httpReader) Read(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if err == nil || err == io.EOF { + return n, err + } + // Read failed (likely due to connection issues), but we will try to reopen + // the pipe and continue. Send a ranged read request that takes into account + // the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil } -func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error { - return errMethodNotSupported + +func (r *httpReader) Close() error { + return r.body.Close() } diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index cf9f899a487..408661718fc 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -27,17 +27,17 @@ import ( // IAM provides access to IAM access control for the bucket. func (b *BucketHandle) IAM() *iam.Handle { return iam.InternalNewHandleClient(&iamClient{ - raw: b.c.raw, userProject: b.userProject, retry: b.retry, + client: b.c, }, b.name) } // iamClient implements the iam.client interface. type iamClient struct { - raw *raw.Service userProject string retry *retryConfig + client *Client } func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { @@ -48,57 +48,25 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion)) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var rp *raw.Policy - err = run(ctx, func() error { - rp, err = call.Context(ctx).Do() - return err - }, c.retry, true, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return iamFromStoragePolicy(rp), nil + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...) } func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") defer func() { trace.EndSpan(ctx, err) }() - rp := iamToStoragePolicy(p) - call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } isIdempotent := len(p.Etag) > 0 - return run(ctx, func() error { - _, err := call.Context(ctx).Do() - return err - }, c.retry, isIdempotent, setRetryHeaderHTTP(call)) + o := makeStorageOpts(isIdempotent, c.retry, c.userProject) + return c.client.tc.SetIamPolicy(ctx, resource, p, o...) } func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.TestIamPermissions(resource, perms) - setClientHeader(call.Header()) - if c.userProject != "" { - call.UserProject(c.userProject) - } - var res *raw.TestIamPermissionsResponse - err = run(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }, c.retry, true, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return res.Permissions, nil + o := makeStorageOpts(true, c.retry, c.userProject) + return c.client.tc.TestIamPermissions(ctx, resource, perms, o...) } func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index d89ce0a2613..56e97548126 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -19,43 +19,54 @@ // // Lets you store and retrieve potentially-large, immutable data objects. // -// NOTE: This package is in alpha. It is not stable, and is likely to change. +// NOTE: This package is in alpha. It is not stable, and is likely to change. // -// Example usage +// # Example usage // // To get started with this package, create a client. -// ctx := context.Background() -// c, err := storage.NewClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() // // The client will use your default application credentials. Clients should be reused instead of created as needed. // The methods of Client are safe for concurrent use by multiple goroutines. // The returned client must be Closed when it is done being used. // -// Using the Client +// # Using the Client // // The following is an example of making an API call with the newly created client. // -// ctx := context.Background() -// c, err := storage.NewClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() -// -// req := &storagepb.DeleteBucketRequest{ -// // TODO: Fill request struct fields. -// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/storage/v2#DeleteBucketRequest. -// } -// err = c.DeleteBucket(ctx, req) -// if err != nil { -// // TODO: Handle error. -// } -// -// Use of Context +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := storage.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &storagepb.DeleteBucketRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest. +// } +// err = c.DeleteBucket(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// +// # Use of Context // // The ctx passed to NewClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index 355215ed415..01103fa93bd 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -10,6 +10,11 @@ "grpc": { "libraryClient": "Client", "rpcs": { + "CancelResumableWrite": { + "methods": [ + "CancelResumableWrite" + ] + }, "ComposeObject": { "methods": [ "ComposeObject" diff --git a/vendor/github.com/googleapis/go-type-adapters/adapters/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go similarity index 65% rename from vendor/github.com/googleapis/go-type-adapters/adapters/doc.go rename to vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go index b15568962be..6ff86c4fb49 100644 --- a/vendor/github.com/googleapis/go-type-adapters/adapters/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,6 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package adapters provides helper functions for the google.type protobuf -// messages (Decimal, Fraction, etc.). -package adapters +package storage + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +// InsertMetadata inserts the given gRPC metadata into the outgoing context. +func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + return insertMetadata(ctx, mds...) +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 803f9d26025..cf7e0e785d6 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -18,15 +18,19 @@ package storage import ( "context" + "fmt" "math" + "net/url" + "regexp" + "strings" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" iampb "google.golang.org/genproto/googleapis/iam/v1" - storagepb "google.golang.org/genproto/googleapis/storage/v2" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/proto" @@ -51,6 +55,7 @@ type CallOptions struct { ListNotifications []gax.CallOption ComposeObject []gax.CallOption DeleteObject []gax.CallOption + CancelResumableWrite []gax.CallOption GetObject []gax.CallOption ReadObject []gax.CallOption UpdateObject []gax.CallOption @@ -96,6 +101,7 @@ func defaultCallOptions() *CallOptions { ListNotifications: []gax.CallOption{}, ComposeObject: []gax.CallOption{}, DeleteObject: []gax.CallOption{}, + CancelResumableWrite: []gax.CallOption{}, GetObject: []gax.CallOption{}, ReadObject: []gax.CallOption{}, UpdateObject: []gax.CallOption{}, @@ -113,7 +119,7 @@ func defaultCallOptions() *CallOptions { } } -// internalClient is an interface that defines the methods availaible from Cloud Storage API. +// internalClient is an interface that defines the methods available from Cloud Storage API. type internalClient interface { Close() error setGoogleClientInfo(...string) @@ -133,6 +139,7 @@ type internalClient interface { ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error + CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) @@ -158,22 +165,22 @@ type internalClient interface { // // Resources are named as follows: // -// Projects are referred to as they are defined by the Resource Manager API, -// using strings like projects/123456 or projects/my-string-id. +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. // -// Buckets are named using string names of the form: -// projects/{project}/buckets/{bucket} -// For globally unique buckets, _ may be substituted for the project. +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. // -// Objects are uniquely identified by their name along with the name of the -// bucket they belong to, as separate strings in this API. For example: +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: // -// ReadObjectRequest { -// bucket: ‘projects/_/buckets/my-bucket’ -// object: ‘my-object’ -// } -// Note that object names can contain / characters, which are treated as -// any other character (no special directory semantics). +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). type Client struct { // The internal transport-dependent client. internalClient internalClient @@ -199,7 +206,8 @@ func (c *Client) setGoogleClientInfo(keyval ...string) { // Connection returns a connection to the API service. // -// Deprecated. +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. func (c *Client) Connection() *grpc.ClientConn { return c.internalClient.Connection() } @@ -229,17 +237,17 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) } -// GetIamPolicy gets the IAM policy for a specified bucket. +// GetIamPolicy gets the IAM policy for a specified bucket or object. func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.GetIamPolicy(ctx, req, opts...) } -// SetIamPolicy updates an IAM policy for the specified bucket. +// SetIamPolicy updates an IAM policy for the specified bucket or object. func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } -// TestIamPermissions tests a set of permissions on the given bucket to see which, if +// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { return c.internalClient.TestIamPermissions(ctx, req, opts...) @@ -280,12 +288,16 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject } // DeleteObject deletes an object and its metadata. Deletions are permanent if versioning -// is not enabled for the bucket, or if the generation parameter -// is used. +// is not enabled for the bucket, or if the generation parameter is used. func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { return c.internalClient.DeleteObject(ctx, req, opts...) } +// CancelResumableWrite cancels an in-progress resumable upload. +func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + return c.internalClient.CancelResumableWrite(ctx, req, opts...) +} + // GetObject retrieves an object’s metadata. func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { return c.internalClient.GetObject(ctx, req, opts...) @@ -312,13 +324,40 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // true, or else it is an error. // // For a resumable write, the client should instead call -// StartResumableWrite() and provide that method an WriteObjectSpec. +// StartResumableWrite(), populating a WriteObjectSpec into that request. // They should then attach the returned upload_id to the first message of -// each following call to Create. If there is an error or the connection is -// broken during the resumable Create(), the client should check the status -// of the Create() by calling QueryWriteStatus() and continue writing from -// the returned persisted_size. This may be less than the amount of data the -// client previously sent. +// each following call to WriteObject. If the stream is closed before +// finishing the upload (either explicitly by the client or due to a network +// error or an error response from the server), the client should do as +// follows: +// +// Check the result Status of the stream, to determine if writing can be +// resumed on this stream or must be restarted from scratch (by calling +// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED, +// INTERNAL, and UNAVAILABLE. For each case, the client should use binary +// exponential backoff before retrying. Additionally, writes can be +// resumed after RESOURCE_EXHAUSTED errors, but only after taking +// appropriate measures, which may include reducing aggregate send rate +// across clients and/or requesting a quota increase for your project. +// +// If the call to WriteObject returns ABORTED, that indicates +// concurrent attempts to update the resumable write, caused either by +// multiple racing clients or by a single client where the previous +// request was timed out on the client side but nonetheless reached the +// server. In this case the client should take steps to prevent further +// concurrent writes (e.g., increase the timeouts, stop using more than +// one process to perform the upload, etc.), and then should follow the +// steps below for resuming the upload. +// +// For resumable errors, the client should call QueryWriteStatus() and +// then continue writing from the returned persisted_size. This may be +// less than the amount of data the client previously sent. Note also that +// it is acceptable to send data starting at an offset earlier than the +// returned persisted_size; in this case, the service will skip data at +// offsets that were already persisted (without checking that it matches +// the previously written data), and write only the data starting from the +// persisted offset. This behavior can make client-side handling simpler +// in some cases. // // The service will not view the object as complete until the client has // sent a WriteObjectRequest with finish_write set to true. Sending any @@ -326,6 +365,10 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // true will cause an error. The client should check the response it // receives to determine how much data the service was able to commit and // whether the service views the object as complete. +// +// Attempting to resume an already finalized object will result in an OK +// status, with a WriteObjectResponse containing the finalized object’s +// metadata. func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { return c.internalClient.WriteObject(ctx, opts...) } @@ -424,22 +467,22 @@ type gRPCClient struct { // // Resources are named as follows: // -// Projects are referred to as they are defined by the Resource Manager API, -// using strings like projects/123456 or projects/my-string-id. +// Projects are referred to as they are defined by the Resource Manager API, +// using strings like projects/123456 or projects/my-string-id. // -// Buckets are named using string names of the form: -// projects/{project}/buckets/{bucket} -// For globally unique buckets, _ may be substituted for the project. +// Buckets are named using string names of the form: +// projects/{project}/buckets/{bucket} +// For globally unique buckets, _ may be substituted for the project. // -// Objects are uniquely identified by their name along with the name of the -// bucket they belong to, as separate strings in this API. For example: +// Objects are uniquely identified by their name along with the name of the +// bucket they belong to, as separate strings in this API. For example: // -// ReadObjectRequest { -// bucket: ‘projects/_/buckets/my-bucket’ -// object: ‘my-object’ -// } -// Note that object names can contain / characters, which are treated as -// any other character (no special directory semantics). +// ReadObjectRequest { +// bucket: ‘projects/_/buckets/my-bucket’ +// object: ‘my-object’ +// } +// Note that object names can contain / characters, which are treated as +// any other character (no special directory semantics). func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { clientOpts := defaultGRPCClientOptions() if newClientHook != nil { @@ -476,7 +519,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // Connection returns a connection to the API service. // -// Deprecated. +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. func (c *gRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } @@ -497,7 +541,18 @@ func (c *gRPCClient) Close() error { } func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -508,7 +563,18 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck } func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -581,7 +647,18 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets } func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -596,7 +673,21 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage } func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -611,7 +702,21 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe } func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -626,7 +731,21 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe } func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -641,7 +760,18 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP } func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -656,7 +786,18 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck } func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -667,7 +808,18 @@ func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.Dele } func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...) var resp *storagepb.Notification err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -682,7 +834,18 @@ func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNoti } func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...) var resp *storagepb.Notification err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -697,7 +860,18 @@ func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.Crea } func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...) it := &NotificationIterator{} req = proto.Clone(req).(*storagepb.ListNotificationsRequest) @@ -740,7 +914,18 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN } func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -755,7 +940,18 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb } func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -765,8 +961,45 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje return err } +func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) + var resp *storagepb.CancelResumableWriteResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -781,7 +1014,18 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ } func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) var resp storagepb.Storage_ReadObjectClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -795,7 +1039,18 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe } func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -825,7 +1080,18 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s } func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) it := &ObjectIterator{} req = proto.Clone(req).(*storagepb.ListObjectsRequest) @@ -868,7 +1134,21 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects } func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 { + routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1]) + } + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) var resp *storagepb.RewriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -883,7 +1163,18 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb } func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) var resp *storagepb.StartResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -898,7 +1189,18 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta } func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + md := metadata.Pairs("x-goog-request-params", routingHeaders) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) var resp *storagepb.QueryWriteStatusResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { diff --git a/vendor/google.golang.org/genproto/googleapis/storage/v2/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go similarity index 72% rename from vendor/google.golang.org/genproto/googleapis/storage/v2/storage.pb.go rename to vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go index f8e6a785fde..28c00714b24 100644 --- a/vendor/google.golang.org/genproto/googleapis/storage/v2/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go @@ -25,17 +25,17 @@ import ( reflect "reflect" sync "sync" + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" v1 "google.golang.org/genproto/googleapis/iam/v1" date "google.golang.org/genproto/googleapis/type/date" + field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -176,7 +176,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0} } // Request message for DeleteBucket. @@ -264,7 +264,7 @@ type GetBucketRequest struct { // Mask specifying which fields to read. // A "*" field may be used to indicate all fields. // If no mask is specified, will default to all fields. - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *GetBucketRequest) Reset() { @@ -320,7 +320,7 @@ func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { return 0 } -func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { +func (x *GetBucketRequest) GetReadMask() *field_mask.FieldMask { if x != nil { return x.ReadMask } @@ -443,7 +443,7 @@ type ListBucketsRequest struct { // If no mask is specified, will default to all fields except items.owner, // items.acl, and items.default_object_acl. // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *ListBucketsRequest) Reset() { @@ -506,7 +506,7 @@ func (x *ListBucketsRequest) GetPrefix() string { return "" } -func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { +func (x *ListBucketsRequest) GetReadMask() *field_mask.FieldMask { if x != nil { return x.ReadMask } @@ -664,7 +664,7 @@ type UpdateBucketRequest struct { // Not specifying any fields is an error. // Not specifying a field while setting that field to a non-default value is // an error. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *field_mask.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateBucketRequest) Reset() { @@ -734,7 +734,7 @@ func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { return "" } -func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { +func (x *UpdateBucketRequest) GetUpdateMask() *field_mask.FieldMask { if x != nil { return x.UpdateMask } @@ -1144,7 +1144,7 @@ func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectReque } // Message for deleting an object. -// Either `bucket` and `object` *or* `upload_id` **must** be set (but not both). +// `bucket` and `object` **must** be set. type DeleteObjectRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1154,10 +1154,6 @@ type DeleteObjectRequest struct { Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` // Required. The name of the object to delete (when not using a resumable write). Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // The resumable upload_id of the object to delete (when deleting an - // in-progress resumable write). This should be copied from the `upload_id` - // field of `StartResumableWriteResponse`. - UploadId string `protobuf:"bytes,3,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` // If present, permanently deletes a specific revision of this object (as // opposed to the latest version, the default). Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"` @@ -1226,13 +1222,6 @@ func (x *DeleteObjectRequest) GetObject() string { return "" } -func (x *DeleteObjectRequest) GetUploadId() string { - if x != nil { - return x.UploadId - } - return "" -} - func (x *DeleteObjectRequest) GetGeneration() int64 { if x != nil { return x.Generation @@ -1275,6 +1264,97 @@ func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectReques return nil } +// Message for canceling an in-progress resumable upload. +// `upload_id` **must** be set. +type CancelResumableWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The upload_id of the resumable upload to cancel. This should be copied + // from the `upload_id` field of `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *CancelResumableWriteRequest) Reset() { + *x = CancelResumableWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteRequest) ProtoMessage() {} + +func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} +} + +func (x *CancelResumableWriteRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// Empty response message for canceling an in-progress resumable upload, will be +// extended as needed. +type CancelResumableWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CancelResumableWriteResponse) Reset() { + *x = CancelResumableWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelResumableWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelResumableWriteResponse) ProtoMessage() {} + +func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. +func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} +} + // Request message for ReadObject. type ReadObjectRequest struct { state protoimpl.MessageState @@ -1328,13 +1408,13 @@ type ReadObjectRequest struct { // If no mask is specified, will default to all fields except metadata.owner // and metadata.acl. // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *field_mask.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1347,7 +1427,7 @@ func (x *ReadObjectRequest) String() string { func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1360,7 +1440,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} } func (x *ReadObjectRequest) GetBucket() string { @@ -1433,7 +1513,7 @@ func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestP return nil } -func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { +func (x *ReadObjectRequest) GetReadMask() *field_mask.FieldMask { if x != nil { return x.ReadMask } @@ -1474,13 +1554,13 @@ type GetObjectRequest struct { // If no mask is specified, will default to all fields except metadata.acl and // metadata.owner. // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *field_mask.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1493,7 +1573,7 @@ func (x *GetObjectRequest) String() string { func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1506,7 +1586,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} } func (x *GetObjectRequest) GetBucket() string { @@ -1565,7 +1645,7 @@ func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestPa return nil } -func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { +func (x *GetObjectRequest) GetReadMask() *field_mask.FieldMask { if x != nil { return x.ReadMask } @@ -1599,7 +1679,7 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1612,7 +1692,7 @@ func (x *ReadObjectResponse) String() string { func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1625,7 +1705,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} } func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { @@ -1683,12 +1763,22 @@ type WriteObjectSpec struct { // Makes the operation conditional on whether the object's current // metageneration does not match the given value. IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // The expected final object size being uploaded. + // If this value is set, closing the stream after writing fewer or more than + // `object_size` bytes will result in an OUT_OF_RANGE error. + // + // This situation is considered a client error, and if such an error occurs + // you must start the upload over from scratch, this time sending the correct + // number of bytes. + // + // The `object_size` value is ignored for one-shot (non-resumable) writes. + ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` } func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1701,7 +1791,7 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1714,7 +1804,7 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} } func (x *WriteObjectSpec) GetResource() *Object { @@ -1759,6 +1849,13 @@ func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 { return 0 } +func (x *WriteObjectSpec) GetObjectSize() int64 { + if x != nil && x.ObjectSize != nil { + return *x.ObjectSize + } + return 0 +} + // Request message for WriteObject. type WriteObjectRequest struct { state protoimpl.MessageState @@ -1768,6 +1865,7 @@ type WriteObjectRequest struct { // The first message of each stream should set one of the following. // // Types that are assignable to FirstMessage: + // // *WriteObjectRequest_UploadId // *WriteObjectRequest_WriteObjectSpec FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` @@ -1788,6 +1886,7 @@ type WriteObjectRequest struct { // A portion of the data for the object. // // Types that are assignable to Data: + // // *WriteObjectRequest_ChecksummedData Data isWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service @@ -1809,7 +1908,7 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1822,7 +1921,7 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1835,7 +1934,7 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -1942,6 +2041,7 @@ type WriteObjectResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *WriteObjectResponse_PersistedSize // *WriteObjectResponse_Resource WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -1950,7 +2050,7 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1963,7 +2063,7 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1976,7 +2076,7 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -2058,7 +2158,7 @@ type ListObjectsRequest struct { // If no mask is specified, will default to all fields except items.acl and // items.owner. // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *field_mask.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` // Filter results to objects whose names are lexicographically equal to or // after lexicographic_start. If lexicographic_end is also set, the objects // listed have names between lexicographic_start (inclusive) and @@ -2074,7 +2174,7 @@ type ListObjectsRequest struct { func (x *ListObjectsRequest) Reset() { *x = ListObjectsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2087,7 +2187,7 @@ func (x *ListObjectsRequest) String() string { func (*ListObjectsRequest) ProtoMessage() {} func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2100,7 +2200,7 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} } func (x *ListObjectsRequest) GetParent() string { @@ -2152,7 +2252,7 @@ func (x *ListObjectsRequest) GetVersions() bool { return false } -func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { +func (x *ListObjectsRequest) GetReadMask() *field_mask.FieldMask { if x != nil { return x.ReadMask } @@ -2189,7 +2289,7 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2202,7 +2302,7 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2215,7 +2315,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -2241,6 +2341,7 @@ type QueryWriteStatusResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *QueryWriteStatusResponse_PersistedSize // *QueryWriteStatusResponse_Resource WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2249,7 +2350,7 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2262,7 +2363,7 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2275,7 +2376,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -2334,15 +2435,15 @@ type RewriteObjectRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Immutable. The name of the destination object. Nearly any sequence of unicode - // characters is valid. See - // [Guidelines](https://cloud.google.com/storage/docs/naming-objects). + // Immutable. The name of the destination object. + // See the + // [Naming Guidelines](https://cloud.google.com/storage/docs/naming-objects). // Example: `test.txt` // The `name` field by itself does not uniquely identify a Cloud Storage // object. A Cloud Storage object is uniquely identified by the tuple of // (bucket, object, generation). DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"` - // Immutable. The name of the bucket containing The name of the destination object. + // Immutable. The name of the bucket containing the destination object. DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"` // The name of the Cloud KMS key that will be used to encrypt the destination // object. The Cloud KMS key must be located in same location as the object. @@ -2429,7 +2530,7 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2442,7 +2543,7 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2455,7 +2556,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -2638,7 +2739,7 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2651,7 +2752,7 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2664,7 +2765,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -2717,7 +2818,7 @@ type StartResumableWriteRequest struct { func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2730,7 +2831,7 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2743,7 +2844,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -2774,7 +2875,7 @@ type StartResumableWriteResponse struct { func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2787,7 +2888,7 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2800,7 +2901,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -2851,7 +2952,7 @@ type UpdateObjectRequest struct { // Not specifying any fields is an error. // Not specifying a field while setting that field to a non-default value is // an error. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *field_mask.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // A set of parameters common to Storage API requests concerning an object. CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` } @@ -2859,7 +2960,7 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2872,7 +2973,7 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2885,7 +2986,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} } func (x *UpdateObjectRequest) GetObject() *Object { @@ -2930,7 +3031,7 @@ func (x *UpdateObjectRequest) GetPredefinedAcl() string { return "" } -func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { +func (x *UpdateObjectRequest) GetUpdateMask() *field_mask.FieldMask { if x != nil { return x.UpdateMask } @@ -2950,14 +3051,15 @@ type GetServiceAccountRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Project ID. + // Required. Project ID, in the format of "projects/". + // can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` } func (x *GetServiceAccountRequest) Reset() { *x = GetServiceAccountRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2970,7 +3072,7 @@ func (x *GetServiceAccountRequest) String() string { func (*GetServiceAccountRequest) ProtoMessage() {} func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2983,7 +3085,7 @@ func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} } func (x *GetServiceAccountRequest) GetProject() string { @@ -2999,7 +3101,9 @@ type CreateHmacKeyRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The project that the HMAC-owning service account lives in. + // Required. The project that the HMAC-owning service account lives in, in the format of + // "projects/". + // can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` // Required. The service account to create the HMAC for. ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` @@ -3008,7 +3112,7 @@ type CreateHmacKeyRequest struct { func (x *CreateHmacKeyRequest) Reset() { *x = CreateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3021,7 +3125,7 @@ func (x *CreateHmacKeyRequest) String() string { func (*CreateHmacKeyRequest) ProtoMessage() {} func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3034,7 +3138,7 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} } func (x *CreateHmacKeyRequest) GetProject() string { @@ -3067,7 +3171,7 @@ type CreateHmacKeyResponse struct { func (x *CreateHmacKeyResponse) Reset() { *x = CreateHmacKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3080,7 +3184,7 @@ func (x *CreateHmacKeyResponse) String() string { func (*CreateHmacKeyResponse) ProtoMessage() {} func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3093,7 +3197,7 @@ func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} } func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { @@ -3118,14 +3222,16 @@ type DeleteHmacKeyRequest struct { // Required. The identifying key for the HMAC to delete. AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project id the HMAC key lies in. + // Required. The project that owns the HMAC key, in the format of + // "projects/". + // can be the project ID or project number. Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } func (x *DeleteHmacKeyRequest) Reset() { *x = DeleteHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3138,7 +3244,7 @@ func (x *DeleteHmacKeyRequest) String() string { func (*DeleteHmacKeyRequest) ProtoMessage() {} func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3151,7 +3257,7 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} } func (x *DeleteHmacKeyRequest) GetAccessId() string { @@ -3176,14 +3282,16 @@ type GetHmacKeyRequest struct { // Required. The identifying key for the HMAC to delete. AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project id the HMAC key lies in. + // Required. The project the HMAC key lies in, in the format of + // "projects/". + // can be the project ID or project number. Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } func (x *GetHmacKeyRequest) Reset() { *x = GetHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3196,7 +3304,7 @@ func (x *GetHmacKeyRequest) String() string { func (*GetHmacKeyRequest) ProtoMessage() {} func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3209,7 +3317,7 @@ func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} } func (x *GetHmacKeyRequest) GetAccessId() string { @@ -3232,7 +3340,9 @@ type ListHmacKeysRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The project id to list HMAC keys for. + // Required. The project to list HMAC keys for, in the format of + // "projects/". + // can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` // Optional. The maximum number of keys to return. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` @@ -3247,7 +3357,7 @@ type ListHmacKeysRequest struct { func (x *ListHmacKeysRequest) Reset() { *x = ListHmacKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3260,7 +3370,7 @@ func (x *ListHmacKeysRequest) String() string { func (*ListHmacKeysRequest) ProtoMessage() {} func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3273,7 +3383,7 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} } func (x *ListHmacKeysRequest) GetProject() string { @@ -3327,7 +3437,7 @@ type ListHmacKeysResponse struct { func (x *ListHmacKeysResponse) Reset() { *x = ListHmacKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3340,7 +3450,7 @@ func (x *ListHmacKeysResponse) String() string { func (*ListHmacKeysResponse) ProtoMessage() {} func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3353,7 +3463,7 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} } func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { @@ -3371,6 +3481,9 @@ func (x *ListHmacKeysResponse) GetNextPageToken() string { } // Request object to update an HMAC key state. +// HmacKeyMetadata.state is required and the only writable field in +// UpdateHmacKey operation. Specifying fields other than state will result in an +// error. type UpdateHmacKeyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3382,13 +3495,15 @@ type UpdateHmacKeyRequest struct { // identify the key. HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` // Update mask for hmac_key. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Not specifying any fields will mean only the `state` field is updated to + // the value specified in `hmac_key`. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateHmacKeyRequest) Reset() { *x = UpdateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +3516,7 @@ func (x *UpdateHmacKeyRequest) String() string { func (*UpdateHmacKeyRequest) ProtoMessage() {} func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +3529,7 @@ func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} } func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { @@ -3424,7 +3539,7 @@ func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { return nil } -func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { +func (x *UpdateHmacKeyRequest) GetUpdateMask() *field_mask.FieldMask { if x != nil { return x.UpdateMask } @@ -3451,7 +3566,7 @@ type CommonObjectRequestParams struct { func (x *CommonObjectRequestParams) Reset() { *x = CommonObjectRequestParams{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3464,7 +3579,7 @@ func (x *CommonObjectRequestParams) String() string { func (*CommonObjectRequestParams) ProtoMessage() {} func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3477,7 +3592,7 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { // Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} } func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { @@ -3511,7 +3626,7 @@ type ServiceConstants struct { func (x *ServiceConstants) Reset() { *x = ServiceConstants{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3524,7 +3639,7 @@ func (x *ServiceConstants) String() string { func (*ServiceConstants) ProtoMessage() {} func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3537,7 +3652,7 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} } // A bucket. @@ -3556,7 +3671,9 @@ type Bucket struct { // If included in the metadata of an UpdateBucketRequest, the operation will // only be performed if the etag matches that of the bucket. Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` - // Immutable. The project which owns this bucket. + // Immutable. The project which owns this bucket, in the format of + // "projects/". + // can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` // Output only. The metadata generation of this bucket. // Attempting to set or update this field will result in a @@ -3599,14 +3716,14 @@ type Bucket struct { // Output only. The creation time of the bucket. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamp.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] // (CORS) config. Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` // Output only. The modification time of the bucket. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamp.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The default value for event-based hold on newly created objects in this // bucket. Event-based hold is a way to retain objects indefinitely until an // event occurs, signified by the @@ -3664,7 +3781,7 @@ type Bucket struct { func (x *Bucket) Reset() { *x = Bucket{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3677,7 +3794,7 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3690,7 +3807,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} } func (x *Bucket) GetName() string { @@ -3777,7 +3894,7 @@ func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { return nil } -func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { +func (x *Bucket) GetCreateTime() *timestamp.Timestamp { if x != nil { return x.CreateTime } @@ -3791,7 +3908,7 @@ func (x *Bucket) GetCors() []*Bucket_Cors { return nil } -func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { +func (x *Bucket) GetUpdateTime() *timestamp.Timestamp { if x != nil { return x.UpdateTime } @@ -3933,7 +4050,7 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3946,7 +4063,7 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3959,7 +4076,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} } func (x *BucketAccessControl) GetRole() string { @@ -4034,7 +4151,7 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4047,7 +4164,7 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4060,7 +4177,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} } func (x *ChecksummedData) GetContent() []byte { @@ -4101,7 +4218,7 @@ type ObjectChecksums struct { func (x *ObjectChecksums) Reset() { *x = ObjectChecksums{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4114,7 +4231,7 @@ func (x *ObjectChecksums) String() string { func (*ObjectChecksums) ProtoMessage() {} func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4127,7 +4244,7 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} } func (x *ObjectChecksums) GetCrc32C() uint32 { @@ -4150,20 +4267,25 @@ type HmacKeyMetadata struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Resource name ID of the key in the format /. + // Immutable. Resource name ID of the key in the format + // /. + // can be the project ID or project number. Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Globally unique id for keys. + // Immutable. Globally unique id for keys. AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // The project ID that the hmac key is contained in. + // Immutable. Identifies the project that owns the service account of the specified HMAC + // key, in the format "projects/". can + // be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` - // Email of the service account the key authenticates as. + // Output only. Email of the service account the key authenticates as. ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` // State of the key. One of ACTIVE, INACTIVE, or DELETED. + // Writable, can be updated by UpdateHmacKey operation. State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - // The creation time of the HMAC key. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // The last modification time of the HMAC key metadata. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // Output only. The creation time of the HMAC key. + CreateTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Output only. The last modification time of the HMAC key metadata. + UpdateTime *timestamp.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The etag of the HMAC key. Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` } @@ -4171,7 +4293,7 @@ type HmacKeyMetadata struct { func (x *HmacKeyMetadata) Reset() { *x = HmacKeyMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4184,7 +4306,7 @@ func (x *HmacKeyMetadata) String() string { func (*HmacKeyMetadata) ProtoMessage() {} func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4197,7 +4319,7 @@ func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} } func (x *HmacKeyMetadata) GetId() string { @@ -4235,14 +4357,14 @@ func (x *HmacKeyMetadata) GetState() string { return "" } -func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { +func (x *HmacKeyMetadata) GetCreateTime() *timestamp.Timestamp { if x != nil { return x.CreateTime } return nil } -func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { +func (x *HmacKeyMetadata) GetUpdateTime() *timestamp.Timestamp { if x != nil { return x.UpdateTime } @@ -4290,7 +4412,7 @@ type Notification struct { func (x *Notification) Reset() { *x = Notification{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4303,7 +4425,7 @@ func (x *Notification) String() string { func (*Notification) ProtoMessage() {} func (x *Notification) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4316,7 +4438,7 @@ func (x *Notification) ProtoReflect() protoreflect.Message { // Deprecated: Use Notification.ProtoReflect.Descriptor instead. func (*Notification) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} } func (x *Notification) GetName() string { @@ -4385,7 +4507,7 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4398,7 +4520,7 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4411,7 +4533,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -4489,7 +4611,7 @@ type Object struct { // version of the object has been deleted. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + DeleteTime *timestamp.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. // If an object is stored without a Content-Type, it is served as @@ -4498,7 +4620,7 @@ type Object struct { // Output only. The creation time of the object. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamp.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Number of underlying components that make up this object. Components are // accumulated by compose operations. // Attempting to set or update this field will result in a @@ -4507,9 +4629,14 @@ type Object struct { // Output only. Hashes for the data part of this object. Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` // Output only. The modification time of the object metadata. + // Set initially to object creation time and then updated whenever any + // metadata of the object changes. This includes changes made by a requester, + // such as modifying custom metadata, as well as changes made by Cloud Storage + // on behalf of a requester, such as changing the storage class based on an + // Object Lifecycle Configuration. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamp.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // Cloud KMS Key used to encrypt this object, if the object is encrypted by // such a key. KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` @@ -4517,7 +4644,7 @@ type Object struct { // object is initially created, it will be set to time_created. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` + UpdateStorageClassTime *timestamp.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` // Whether an object is under temporary hold. While this flag is set to true, // the object is protected against deletion and overwrites. A common use case // of this flag is regulatory investigations where objects need to be retained @@ -4531,7 +4658,7 @@ type Object struct { // Note 2: This value can be provided even when temporary hold is set (so that // the user can reason about policy without having to first unset the // temporary hold). - RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` + RetentionExpireTime *timestamp.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` // User-provided metadata, in key/value pairs. Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Whether an object is under event-based hold. @@ -4554,13 +4681,13 @@ type Object struct { // such a key. CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` // A user-specified timestamp set on an object. - CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` + CustomTime *timestamp.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` } func (x *Object) Reset() { *x = Object{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4573,7 +4700,7 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4586,7 +4713,7 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} } func (x *Object) GetName() string { @@ -4673,7 +4800,7 @@ func (x *Object) GetContentLanguage() string { return "" } -func (x *Object) GetDeleteTime() *timestamppb.Timestamp { +func (x *Object) GetDeleteTime() *timestamp.Timestamp { if x != nil { return x.DeleteTime } @@ -4687,7 +4814,7 @@ func (x *Object) GetContentType() string { return "" } -func (x *Object) GetCreateTime() *timestamppb.Timestamp { +func (x *Object) GetCreateTime() *timestamp.Timestamp { if x != nil { return x.CreateTime } @@ -4708,7 +4835,7 @@ func (x *Object) GetChecksums() *ObjectChecksums { return nil } -func (x *Object) GetUpdateTime() *timestamppb.Timestamp { +func (x *Object) GetUpdateTime() *timestamp.Timestamp { if x != nil { return x.UpdateTime } @@ -4722,7 +4849,7 @@ func (x *Object) GetKmsKey() string { return "" } -func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { +func (x *Object) GetUpdateStorageClassTime() *timestamp.Timestamp { if x != nil { return x.UpdateStorageClassTime } @@ -4736,7 +4863,7 @@ func (x *Object) GetTemporaryHold() bool { return false } -func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { +func (x *Object) GetRetentionExpireTime() *timestamp.Timestamp { if x != nil { return x.RetentionExpireTime } @@ -4771,7 +4898,7 @@ func (x *Object) GetCustomerEncryption() *CustomerEncryption { return nil } -func (x *Object) GetCustomTime() *timestamppb.Timestamp { +func (x *Object) GetCustomTime() *timestamp.Timestamp { if x != nil { return x.CustomTime } @@ -4822,7 +4949,7 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4835,7 +4962,7 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4848,7 +4975,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} } func (x *ObjectAccessControl) GetRole() string { @@ -4926,7 +5053,7 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4939,7 +5066,7 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4952,7 +5079,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -4991,7 +5118,7 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5004,7 +5131,7 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5017,7 +5144,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} } func (x *ProjectTeam) GetProjectNumber() string { @@ -5049,7 +5176,7 @@ type ServiceAccount struct { func (x *ServiceAccount) Reset() { *x = ServiceAccount{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5062,7 +5189,7 @@ func (x *ServiceAccount) String() string { func (*ServiceAccount) ProtoMessage() {} func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5075,7 +5202,7 @@ func (x *ServiceAccount) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} } func (x *ServiceAccount) GetEmailAddress() string { @@ -5100,7 +5227,7 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5113,7 +5240,7 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5126,7 +5253,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} } func (x *Owner) GetEntity() string { @@ -5160,7 +5287,7 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5173,7 +5300,7 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5186,7 +5313,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} } func (x *ContentRange) GetStart() int64 { @@ -5228,7 +5355,7 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5241,7 +5368,7 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5293,7 +5420,7 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5306,7 +5433,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5342,7 +5469,7 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5355,7 +5482,7 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5368,7 +5495,7 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0} } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -5408,7 +5535,7 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5421,7 +5548,7 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5434,7 +5561,7 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1} } func (x *Bucket_Cors) GetOrigin() []string { @@ -5479,7 +5606,7 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5492,7 +5619,7 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5505,7 +5632,7 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 2} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2} } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -5531,7 +5658,7 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5544,7 +5671,7 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5557,7 +5684,7 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 3} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3} } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -5589,7 +5716,7 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5602,7 +5729,7 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5615,7 +5742,7 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 4} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4} } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -5641,7 +5768,7 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5654,7 +5781,7 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5667,7 +5794,7 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 5} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5} } func (x *Bucket_Logging) GetLogBucket() string { @@ -5692,7 +5819,7 @@ type Bucket_RetentionPolicy struct { // Server-determined value that indicates the time from which policy was // enforced and effective. - EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` + EffectiveTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` // Once locked, an object retention policy cannot be modified. IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` // The duration in seconds that objects need to be retained. Retention @@ -5705,7 +5832,7 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5718,7 +5845,7 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5731,10 +5858,10 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 6} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} } -func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { +func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamp.Timestamp { if x != nil { return x.EffectiveTime } @@ -5770,7 +5897,7 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5783,7 +5910,7 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5796,7 +5923,7 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 7} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7} } func (x *Bucket_Versioning) GetEnabled() bool { @@ -5830,7 +5957,7 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5843,7 +5970,7 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5856,7 +5983,7 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 8} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8} } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -5888,7 +6015,7 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5901,7 +6028,7 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5914,7 +6041,7 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 9} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9} } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -5936,13 +6063,13 @@ type Bucket_Autoclass struct { // disabled/unconfigured or set to false after being enabled. If Autoclass // is enabled when the bucket is created, the toggle_time is set to the // bucket creation time. - ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` + ToggleTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` } func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5955,7 +6082,7 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5968,7 +6095,7 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 10} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10} } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -5978,7 +6105,7 @@ func (x *Bucket_Autoclass) GetEnabled() bool { return false } -func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { +func (x *Bucket_Autoclass) GetToggleTime() *timestamp.Timestamp { if x != nil { return x.ToggleTime } @@ -5997,13 +6124,13 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { // The deadline time for changing // `iamConfig.uniformBucketLevelAccess.enabled` from `true` to `false`. // Mutable until the specified deadline is reached, but not afterward. - LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` + LockTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` } func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6016,7 +6143,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6029,7 +6156,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 3, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0} } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -6039,7 +6166,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { return false } -func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamp.Timestamp { if x != nil { return x.LockTime } @@ -6062,7 +6189,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6075,7 +6202,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6088,7 +6215,7 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 4, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0} } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -6122,7 +6249,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6135,7 +6262,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6148,7 +6275,7 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 4, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0} } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -6220,7 +6347,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6233,7 +6360,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6246,7 +6373,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38, 4, 0, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1} } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -6337,1092 +6464,1103 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, - 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, - 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, - 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, - 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, - 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, - 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xa6, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, - 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, - 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, - 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, - 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x22, 0xac, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x22, 0xa6, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, + 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, + 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, - 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, + 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, + 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, + 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xac, 0x03, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, + 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x41, 0x63, 0x6c, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, + 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, + 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x0a, - 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0xf4, 0x06, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, + 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, + 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf4, 0x06, 0x0a, 0x14, + 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, + 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, + 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, + 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, + 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, + 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, + 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, + 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, - 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, - 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, - 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, 0x10, 0x47, 0x65, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, + 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xdd, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, - 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, - 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, - 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, - 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, - 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, - 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, - 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, - 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, - 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, - 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, - 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x35, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, - 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, - 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xd1, 0x03, 0x0a, 0x0f, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x35, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, - 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, - 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, - 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, + 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, - 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xf8, 0x03, - 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, - 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, - 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, - 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, - 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, - 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, - 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x22, 0xc9, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, - 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, - 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, - 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, - 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, - 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, - 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xbe, 0x0d, 0x0a, 0x14, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, - 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, - 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, - 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, - 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, - 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, - 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, - 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, - 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, - 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, - 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, + 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, + 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, + 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xc9, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, + 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, + 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, + 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, + 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, - 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, + 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, + 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, + 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xbe, + 0x0d, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, + 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, + 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, + 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, - 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, - 0x65, 0x63, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, + 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, + 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, + 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, + 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, + 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, + 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x22, 0xfd, 0x04, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, + 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0xfd, - 0x04, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, - 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, - 0x63, 0x6c, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, - 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, - 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, + 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, + 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, + 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, + 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, + 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x22, 0x94, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2f, 0x0a, 0x11, 0x73, 0x68, 0x6f, + 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, - 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, - 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, - 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, - 0x94, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, - 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x0a, 0x15, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2f, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, - 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, - 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, - 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, - 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, - 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, - 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, - 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, - 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, - 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, - 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, - 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, - 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, - 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, + 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, + 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, + 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, + 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, + 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, + 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, + 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, + 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, + 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, + 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, + 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, + 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, - 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, - 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, - 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, - 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, - 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, - 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, - 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, - 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, - 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, - 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, - 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, - 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, - 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, - 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, - 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, - 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, - 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, - 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, - 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, - 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, - 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, - 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, - 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, - 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, - 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, - 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, - 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, - 0x22, 0x8c, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, - 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, - 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, + 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, + 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, + 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, + 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, + 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, + 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, + 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, + 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, + 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, + 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, + 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, + 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, + 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, + 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, + 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, + 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, + 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, + 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, + 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, + 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, + 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, + 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, + 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, + 0x1a, 0x02, 0x10, 0x01, 0x22, 0x8c, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, + 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, + 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, + 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, + 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, + 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, + 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, + 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, + 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, + 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, + 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, + 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, - 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, - 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, + 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, + 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, - 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, - 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, - 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, - 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, - 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, - 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, - 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, - 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, - 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, + 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, + 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, + 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, + 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, - 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, - 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, - 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0x30, - 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, - 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, - 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, - 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, - 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, - 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, - 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, - 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, - 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, - 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, - 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, - 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, - 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, - 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, - 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, - 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, - 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, - 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, - 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, - 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, - 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, - 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, - 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, - 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, - 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, - 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, - 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, - 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, - 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x1a, 0x9c, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, - 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, - 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, + 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, + 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, + 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, + 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, + 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, + 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, + 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, + 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, - 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, - 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, - 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, - 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, - 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, - 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, - 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, - 0xf3, 0x01, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, - 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, - 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, - 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, - 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, - 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, - 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, - 0x22, 0xe2, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, - 0x64, 0x12, 0x4a, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x30, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x32, 0x0a, - 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, - 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, + 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, + 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, + 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, + 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, + 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, + 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, + 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, + 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, + 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, + 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, + 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, + 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, + 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, + 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, + 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, + 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, + 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, + 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, + 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, + 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, + 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, + 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, + 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x1a, 0x9c, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, + 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, + 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, + 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, + 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, + 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, + 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x7d, 0x22, 0xf3, 0x01, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, + 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, + 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, + 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, + 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, + 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, + 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, + 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, + 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xfb, 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, @@ -7600,241 +7738,315 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xb0, 0x1b, 0x0a, - 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x57, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xda, 0x24, 0x0a, + 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x07, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x54, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, + 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, + 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x07, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6d, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x1a, 0xda, 0x41, 0x17, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x67, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x09, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x76, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x09, 0xda, 0x41, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0x0b, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x5d, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x12, 0xda, 0x41, 0x0f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x82, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6d, 0x0a, + 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x22, 0x1a, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x67, 0x0a, 0x0b, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x09, 0xda, 0x41, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, + 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0xda, 0x41, 0x14, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x15, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x63, 0x0a, - 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x07, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x66, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x07, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x7b, 0x0a, 0x12, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x16, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x79, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x09, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x55, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, + 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x0c, 0x44, 0x65, 0x6c, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, + 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x96, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, + 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2b, 0xda, 0x41, 0x0d, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x22, 0x2b, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x88, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, - 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, - 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x68, 0x0a, 0x0c, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, + 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x15, - 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, + 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, + 0xc7, 0x01, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x61, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x28, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, + 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x67, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, + 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x09, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x12, 0x5e, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x76, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0c, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x12, 0x6f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x0a, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x84, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, + 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, + 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, + 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x6f, 0x0a, + 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x22, 0x0a, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x84, + 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x20, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0xda, 0x41, 0x1d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x14, 0xda, - 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x6c, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x14, 0xda, 0x41, 0x11, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x6b, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x0a, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x75, - 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x17, 0xda, 0x41, - 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x14, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x6c, 0x0a, + 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x14, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x6b, 0x0a, 0x0c, 0x4c, + 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0a, 0xda, 0x41, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x75, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x17, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, + 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, + 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, + 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, + 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, + 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, - 0xdc, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, - 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, 0x15, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0xea, 0x41, 0x78, + 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, + 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, + 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -7850,7 +8062,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 72) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 74) var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -7867,169 +8079,171 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*ListNotificationsResponse)(nil), // 12: google.storage.v2.ListNotificationsResponse (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*ReadObjectRequest)(nil), // 15: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 16: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 17: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 18: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 19: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 20: google.storage.v2.WriteObjectResponse - (*ListObjectsRequest)(nil), // 21: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 22: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 23: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 24: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 25: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 26: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 27: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 28: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 29: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 30: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 31: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 32: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 33: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 34: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 35: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 36: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 37: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 38: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 39: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 40: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 41: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 42: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 43: google.storage.v2.HmacKeyMetadata - (*Notification)(nil), // 44: google.storage.v2.Notification - (*CustomerEncryption)(nil), // 45: google.storage.v2.CustomerEncryption - (*Object)(nil), // 46: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 47: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 48: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 49: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 50: google.storage.v2.ServiceAccount - (*Owner)(nil), // 51: google.storage.v2.Owner - (*ContentRange)(nil), // 52: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 53: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 54: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 55: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 56: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 57: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 58: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 59: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 60: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 61: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_Versioning)(nil), // 62: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 63: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 64: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 65: google.storage.v2.Bucket.Autoclass - nil, // 66: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 67: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 68: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 69: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 71: google.storage.v2.Notification.CustomAttributesEntry - nil, // 72: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 73: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 74: google.protobuf.Timestamp - (*date.Date)(nil), // 75: google.type.Date - (*v1.GetIamPolicyRequest)(nil), // 76: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 77: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 78: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 79: google.protobuf.Empty - (*v1.Policy)(nil), // 80: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 81: google.iam.v1.TestIamPermissionsResponse + (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 16: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 17: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 18: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 19: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 20: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 21: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 22: google.storage.v2.WriteObjectResponse + (*ListObjectsRequest)(nil), // 23: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 24: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 25: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 26: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 27: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 28: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 29: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 30: google.storage.v2.UpdateObjectRequest + (*GetServiceAccountRequest)(nil), // 31: google.storage.v2.GetServiceAccountRequest + (*CreateHmacKeyRequest)(nil), // 32: google.storage.v2.CreateHmacKeyRequest + (*CreateHmacKeyResponse)(nil), // 33: google.storage.v2.CreateHmacKeyResponse + (*DeleteHmacKeyRequest)(nil), // 34: google.storage.v2.DeleteHmacKeyRequest + (*GetHmacKeyRequest)(nil), // 35: google.storage.v2.GetHmacKeyRequest + (*ListHmacKeysRequest)(nil), // 36: google.storage.v2.ListHmacKeysRequest + (*ListHmacKeysResponse)(nil), // 37: google.storage.v2.ListHmacKeysResponse + (*UpdateHmacKeyRequest)(nil), // 38: google.storage.v2.UpdateHmacKeyRequest + (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 41: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums + (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata + (*Notification)(nil), // 46: google.storage.v2.Notification + (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption + (*Object)(nil), // 48: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 50: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 51: google.storage.v2.ProjectTeam + (*ServiceAccount)(nil), // 52: google.storage.v2.ServiceAccount + (*Owner)(nil), // 53: google.storage.v2.Owner + (*ContentRange)(nil), // 54: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_Versioning)(nil), // 64: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 65: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 66: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 67: google.storage.v2.Bucket.Autoclass + nil, // 68: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 69: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 73: google.storage.v2.Notification.CustomAttributesEntry + nil, // 74: google.storage.v2.Object.MetadataEntry + (*field_mask.FieldMask)(nil), // 75: google.protobuf.FieldMask + (*timestamp.Timestamp)(nil), // 76: google.protobuf.Timestamp + (*date.Date)(nil), // 77: google.type.Date + (*v1.GetIamPolicyRequest)(nil), // 78: google.iam.v1.GetIamPolicyRequest + (*v1.SetIamPolicyRequest)(nil), // 79: google.iam.v1.SetIamPolicyRequest + (*v1.TestIamPermissionsRequest)(nil), // 80: google.iam.v1.TestIamPermissionsRequest + (*empty.Empty)(nil), // 81: google.protobuf.Empty + (*v1.Policy)(nil), // 82: google.iam.v1.Policy + (*v1.TestIamPermissionsResponse)(nil), // 83: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 73, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 73, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 39, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 73, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 44, // 6: google.storage.v2.CreateNotificationRequest.notification:type_name -> google.storage.v2.Notification - 44, // 7: google.storage.v2.ListNotificationsResponse.notifications:type_name -> google.storage.v2.Notification - 46, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 53, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 37, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 37, // 11: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 37, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 73, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 37, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 73, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 42, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 52, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 46, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 46, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 18, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 41, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 42, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 37, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 46, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 73, // 26: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 37, // 27: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 46, // 28: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 46, // 29: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 37, // 30: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 46, // 31: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 18, // 32: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 37, // 33: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 46, // 34: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 73, // 35: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 37, // 36: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 43, // 37: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 43, // 38: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 43, // 39: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 73, // 40: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 40, // 41: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 47, // 42: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 59, // 43: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 74, // 44: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 56, // 45: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 74, // 46: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 66, // 47: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 63, // 48: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 62, // 49: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 60, // 50: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 51, // 51: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 57, // 52: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 55, // 53: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 61, // 54: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 58, // 55: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 64, // 56: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 65, // 57: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 49, // 58: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 74, // 59: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 74, // 60: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 71, // 61: google.storage.v2.Notification.custom_attributes:type_name -> google.storage.v2.Notification.CustomAttributesEntry - 47, // 62: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 74, // 63: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 74, // 64: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 42, // 65: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 74, // 66: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 74, // 67: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 74, // 68: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 72, // 69: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 51, // 70: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 45, // 71: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 74, // 72: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 49, // 73: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 46, // 74: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 54, // 75: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 67, // 76: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 68, // 77: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 74, // 78: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 74, // 79: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 74, // 80: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 69, // 81: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 70, // 82: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 75, // 83: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 75, // 84: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 75, // 85: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 75, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 46, // 6: google.storage.v2.CreateNotificationRequest.notification:type_name -> google.storage.v2.Notification + 46, // 7: google.storage.v2.ListNotificationsResponse.notifications:type_name -> google.storage.v2.Notification + 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 39, // 11: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 39, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 75, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 39, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 75, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 43, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 44, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 54, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 48, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 48, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 20, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 43, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 44, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 39, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 75, // 26: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 39, // 27: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 28: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 48, // 29: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 39, // 30: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 31: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 20, // 32: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 39, // 33: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 34: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 75, // 35: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 39, // 36: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 45, // 37: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata + 45, // 38: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata + 45, // 39: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata + 75, // 40: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 41: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 49, // 42: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 61, // 43: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 76, // 44: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 58, // 45: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 76, // 46: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 68, // 47: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 65, // 48: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 64, // 49: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 62, // 50: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 53, // 51: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 59, // 52: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 57, // 53: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 63, // 54: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 60, // 55: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 66, // 56: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 67, // 57: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 51, // 58: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 76, // 59: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 76, // 60: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 73, // 61: google.storage.v2.Notification.custom_attributes:type_name -> google.storage.v2.Notification.CustomAttributesEntry + 49, // 62: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 76, // 63: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 76, // 64: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 44, // 65: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 76, // 66: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 76, // 67: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 76, // 68: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 74, // 69: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 53, // 70: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 47, // 71: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 76, // 72: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 51, // 73: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 48, // 74: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 56, // 75: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 69, // 76: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 70, // 77: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 76, // 78: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 76, // 79: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 76, // 80: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 71, // 81: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 72, // 82: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 77, // 83: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 77, // 84: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 77, // 85: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date 1, // 86: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest 2, // 87: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest 3, // 88: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest 4, // 89: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest 6, // 90: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 76, // 91: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 77, // 92: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 78, // 93: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 78, // 91: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 79, // 92: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 80, // 93: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest 7, // 94: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest 8, // 95: google.storage.v2.Storage.DeleteNotification:input_type -> google.storage.v2.DeleteNotificationRequest 9, // 96: google.storage.v2.Storage.GetNotification:input_type -> google.storage.v2.GetNotificationRequest @@ -8037,51 +8251,53 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 11, // 98: google.storage.v2.Storage.ListNotifications:input_type -> google.storage.v2.ListNotificationsRequest 13, // 99: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest 14, // 100: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 16, // 101: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 15, // 102: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 28, // 103: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 19, // 104: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 21, // 105: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 24, // 106: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 26, // 107: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 22, // 108: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 29, // 109: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 30, // 110: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 32, // 111: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 33, // 112: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 34, // 113: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 36, // 114: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 79, // 115: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 39, // 116: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 39, // 117: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 118: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 39, // 119: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 80, // 120: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 80, // 121: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 81, // 122: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 39, // 123: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 79, // 124: google.storage.v2.Storage.DeleteNotification:output_type -> google.protobuf.Empty - 44, // 125: google.storage.v2.Storage.GetNotification:output_type -> google.storage.v2.Notification - 44, // 126: google.storage.v2.Storage.CreateNotification:output_type -> google.storage.v2.Notification - 12, // 127: google.storage.v2.Storage.ListNotifications:output_type -> google.storage.v2.ListNotificationsResponse - 46, // 128: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 79, // 129: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 46, // 130: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 17, // 131: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 46, // 132: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 20, // 133: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 48, // 134: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 25, // 135: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 27, // 136: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 23, // 137: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 50, // 138: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 31, // 139: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 79, // 140: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 43, // 141: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 35, // 142: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 43, // 143: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 115, // [115:144] is the sub-list for method output_type - 86, // [86:115] is the sub-list for method input_type + 15, // 101: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 18, // 102: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 17, // 103: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 30, // 104: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 21, // 105: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 23, // 106: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 26, // 107: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 28, // 108: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 24, // 109: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 31, // 110: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 32, // 111: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 34, // 112: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 35, // 113: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 36, // 114: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 38, // 115: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 81, // 116: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 41, // 117: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 41, // 118: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 119: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 41, // 120: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 82, // 121: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 82, // 122: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 83, // 123: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 41, // 124: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 81, // 125: google.storage.v2.Storage.DeleteNotification:output_type -> google.protobuf.Empty + 46, // 126: google.storage.v2.Storage.GetNotification:output_type -> google.storage.v2.Notification + 46, // 127: google.storage.v2.Storage.CreateNotification:output_type -> google.storage.v2.Notification + 12, // 128: google.storage.v2.Storage.ListNotifications:output_type -> google.storage.v2.ListNotificationsResponse + 48, // 129: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 81, // 130: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 16, // 131: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 48, // 132: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 19, // 133: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 48, // 134: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 22, // 135: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 50, // 136: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 27, // 137: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 29, // 138: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 25, // 139: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 52, // 140: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 33, // 141: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 81, // 142: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 45, // 143: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 37, // 144: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 45, // 145: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 116, // [116:146] is the sub-list for method output_type + 86, // [86:116] is the sub-list for method input_type 86, // [86:86] is the sub-list for extension type_name 86, // [86:86] is the sub-list for extension extendee 0, // [0:86] is the sub-list for field type_name @@ -8262,7 +8478,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectRequest); i { + switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8274,7 +8490,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetObjectRequest); i { + switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8286,7 +8502,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectResponse); i { + switch v := v.(*ReadObjectRequest); i { case 0: return &v.state case 1: @@ -8298,7 +8514,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectSpec); i { + switch v := v.(*GetObjectRequest); i { case 0: return &v.state case 1: @@ -8310,7 +8526,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectRequest); i { + switch v := v.(*ReadObjectResponse); i { case 0: return &v.state case 1: @@ -8322,7 +8538,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectResponse); i { + switch v := v.(*WriteObjectSpec); i { case 0: return &v.state case 1: @@ -8334,7 +8550,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsRequest); i { + switch v := v.(*WriteObjectRequest); i { case 0: return &v.state case 1: @@ -8346,7 +8562,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusRequest); i { + switch v := v.(*WriteObjectResponse); i { case 0: return &v.state case 1: @@ -8358,7 +8574,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusResponse); i { + switch v := v.(*ListObjectsRequest); i { case 0: return &v.state case 1: @@ -8370,7 +8586,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteObjectRequest); i { + switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state case 1: @@ -8382,7 +8598,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteResponse); i { + switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state case 1: @@ -8394,7 +8610,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteRequest); i { + switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state case 1: @@ -8406,7 +8622,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteResponse); i { + switch v := v.(*RewriteResponse); i { case 0: return &v.state case 1: @@ -8418,7 +8634,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateObjectRequest); i { + switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8430,7 +8646,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceAccountRequest); i { + switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8442,7 +8658,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyRequest); i { + switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state case 1: @@ -8454,7 +8670,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyResponse); i { + switch v := v.(*GetServiceAccountRequest); i { case 0: return &v.state case 1: @@ -8466,7 +8682,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteHmacKeyRequest); i { + switch v := v.(*CreateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8478,7 +8694,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetHmacKeyRequest); i { + switch v := v.(*CreateHmacKeyResponse); i { case 0: return &v.state case 1: @@ -8490,7 +8706,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysRequest); i { + switch v := v.(*DeleteHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8502,7 +8718,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysResponse); i { + switch v := v.(*GetHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8514,7 +8730,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateHmacKeyRequest); i { + switch v := v.(*ListHmacKeysRequest); i { case 0: return &v.state case 1: @@ -8526,7 +8742,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CommonObjectRequestParams); i { + switch v := v.(*ListHmacKeysResponse); i { case 0: return &v.state case 1: @@ -8538,7 +8754,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConstants); i { + switch v := v.(*UpdateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8550,7 +8766,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { + switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state case 1: @@ -8562,7 +8778,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketAccessControl); i { + switch v := v.(*ServiceConstants); i { case 0: return &v.state case 1: @@ -8574,7 +8790,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChecksummedData); i { + switch v := v.(*Bucket); i { case 0: return &v.state case 1: @@ -8586,7 +8802,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectChecksums); i { + switch v := v.(*BucketAccessControl); i { case 0: return &v.state case 1: @@ -8598,7 +8814,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HmacKeyMetadata); i { + switch v := v.(*ChecksummedData); i { case 0: return &v.state case 1: @@ -8610,7 +8826,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Notification); i { + switch v := v.(*ObjectChecksums); i { case 0: return &v.state case 1: @@ -8622,7 +8838,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomerEncryption); i { + switch v := v.(*HmacKeyMetadata); i { case 0: return &v.state case 1: @@ -8634,7 +8850,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { + switch v := v.(*Notification); i { case 0: return &v.state case 1: @@ -8646,7 +8862,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectAccessControl); i { + switch v := v.(*CustomerEncryption); i { case 0: return &v.state case 1: @@ -8658,7 +8874,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsResponse); i { + switch v := v.(*Object); i { case 0: return &v.state case 1: @@ -8670,7 +8886,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProjectTeam); i { + switch v := v.(*ObjectAccessControl); i { case 0: return &v.state case 1: @@ -8682,7 +8898,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceAccount); i { + switch v := v.(*ListObjectsResponse); i { case 0: return &v.state case 1: @@ -8694,7 +8910,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Owner); i { + switch v := v.(*ProjectTeam); i { case 0: return &v.state case 1: @@ -8706,7 +8922,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ContentRange); i { + switch v := v.(*ServiceAccount); i { case 0: return &v.state case 1: @@ -8718,7 +8934,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject); i { + switch v := v.(*Owner); i { case 0: return &v.state case 1: @@ -8730,7 +8946,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + switch v := v.(*ContentRange); i { case 0: return &v.state case 1: @@ -8742,7 +8958,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Billing); i { + switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state case 1: @@ -8754,7 +8970,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Cors); i { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state case 1: @@ -8766,7 +8982,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Encryption); i { + switch v := v.(*Bucket_Billing); i { case 0: return &v.state case 1: @@ -8778,7 +8994,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig); i { + switch v := v.(*Bucket_Cors); i { case 0: return &v.state case 1: @@ -8790,7 +9006,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle); i { + switch v := v.(*Bucket_Encryption); i { case 0: return &v.state case 1: @@ -8802,7 +9018,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Logging); i { + switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state case 1: @@ -8814,7 +9030,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_RetentionPolicy); i { + switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state case 1: @@ -8826,7 +9042,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Versioning); i { + switch v := v.(*Bucket_Logging); i { case 0: return &v.state case 1: @@ -8838,7 +9054,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Website); i { + switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state case 1: @@ -8850,7 +9066,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_CustomPlacementConfig); i { + switch v := v.(*Bucket_Versioning); i { case 0: return &v.state case 1: @@ -8862,7 +9078,19 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Autoclass); i { + switch v := v.(*Bucket_Website); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state case 1: @@ -8874,6 +9102,18 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { case 0: return &v.state @@ -8885,7 +9125,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule); i { case 0: return &v.state @@ -8897,7 +9137,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -8909,7 +9149,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -8928,37 +9168,37 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[40].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[41].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[53].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[69].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 72, + NumMessages: 74, NumExtensions: 0, NumServices: 1, }, @@ -8986,7 +9226,7 @@ const _ = grpc.SupportPackageIsVersion6 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type StorageClient interface { // Permanently deletes an empty bucket. - DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error) // Returns metadata for the specified bucket. GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) // Creates a new bucket. @@ -8995,17 +9235,17 @@ type StorageClient interface { ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) // Locks retention policy on a bucket. LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) - // Gets the IAM policy for a specified bucket. + // Gets the IAM policy for a specified bucket or object. GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) - // Updates an IAM policy for the specified bucket. + // Updates an IAM policy for the specified bucket or object. SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) - // Tests a set of permissions on the given bucket to see which, if + // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) // Permanently deletes a notification subscription. - DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error) // View a notification config. GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) // Creates a notification subscription for a given bucket. @@ -9019,9 +9259,10 @@ type StorageClient interface { // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter - // is used. - DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // is not enabled for the bucket, or if the `generation` parameter is used. + DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Cancels an in-progress resumable upload. + CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) // Reads an object's data. @@ -9110,7 +9351,7 @@ type StorageClient interface { // Creates a new HMAC key for the given service account. CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) // Gets an existing HMAC key metadata for the given id. GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) // Lists HMAC keys under a given project with the additional filters provided. @@ -9127,8 +9368,8 @@ func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { return &storageClient{cc} } -func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) if err != nil { return nil, err @@ -9208,8 +9449,8 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques return out, nil } -func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) if err != nil { return nil, err @@ -9253,8 +9494,8 @@ func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequ return out, nil } -func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) if err != nil { return nil, err @@ -9262,6 +9503,15 @@ func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectReques return out, nil } +func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { + out := new(CancelResumableWriteResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *storageClient) GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) { out := new(Object) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetObject", in, out, opts...) @@ -9400,8 +9650,8 @@ func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequ return out, nil } -func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) +func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) if err != nil { return nil, err @@ -9439,7 +9689,7 @@ func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequ // StorageServer is the server API for Storage service. type StorageServer interface { // Permanently deletes an empty bucket. - DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) + DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error) // Returns metadata for the specified bucket. GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) // Creates a new bucket. @@ -9448,17 +9698,17 @@ type StorageServer interface { ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) // Locks retention policy on a bucket. LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) - // Gets the IAM policy for a specified bucket. + // Gets the IAM policy for a specified bucket or object. GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) - // Updates an IAM policy for the specified bucket. + // Updates an IAM policy for the specified bucket or object. SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) - // Tests a set of permissions on the given bucket to see which, if + // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) // Permanently deletes a notification subscription. - DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) + DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error) // View a notification config. GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) // Creates a notification subscription for a given bucket. @@ -9472,9 +9722,10 @@ type StorageServer interface { // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter - // is used. - DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) + // is not enabled for the bucket, or if the `generation` parameter is used. + DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error) + // Cancels an in-progress resumable upload. + CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. GetObject(context.Context, *GetObjectRequest) (*Object, error) // Reads an object's data. @@ -9563,7 +9814,7 @@ type StorageServer interface { // Creates a new HMAC key for the given service account. CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) + DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error) // Gets an existing HMAC key metadata for the given id. GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) // Lists HMAC keys under a given project with the additional filters provided. @@ -9576,7 +9827,7 @@ type StorageServer interface { type UnimplementedStorageServer struct { } -func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { +func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") } func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { @@ -9603,7 +9854,7 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestI func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") } -func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { +func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") } func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { @@ -9618,9 +9869,12 @@ func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotif func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") } -func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { +func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") } +func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") +} func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") } @@ -9651,7 +9905,7 @@ func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServic func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") } -func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { +func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") } func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { @@ -9938,6 +10192,24 @@ func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelResumableWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).CancelResumableWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/CancelResumableWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).CancelResumableWrite(ctx, req.(*CancelResumableWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Storage_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetObjectRequest) if err := dec(in); err != nil { @@ -10265,6 +10537,10 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "DeleteObject", Handler: _Storage_DeleteObject_Handler, }, + { + MethodName: "CancelResumableWrite", + Handler: _Storage_CancelResumableWrite_Handler, + }, { MethodName: "GetObject", Handler: _Storage_GetObject_Handler, diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 08bddba7486..dc1cb9f6009 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.22.1" +const Version = "1.27.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index d0f0dd8d171..810d64285d0 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -57,7 +57,7 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten bo.Initial = retry.backoff.Initial bo.Max = retry.backoff.Max } - var errorFunc func(err error) bool = shouldRetry + var errorFunc func(err error) bool = ShouldRetry if retry.shouldRetry != nil { errorFunc = retry.shouldRetry } @@ -89,7 +89,16 @@ func setRetryHeaderGRPC(_ context.Context) func(string, int) { } } -func shouldRetry(err error) bool { +// ShouldRetry returns true if an error is retryable, based on best practice +// guidance from GCS. See +// https://cloud.google.com/storage/docs/retry-strategy#go for more information +// on what errors are considered retryable. +// +// If you would like to customize retryable errors, use the WithErrorFunc to +// supply a RetryOption to your library calls. For example, to retry additional +// errors, you can write a custom func that wraps ShouldRetry and also specifies +// additional errors that should return true. +func ShouldRetry(err error) bool { if err == nil { return false } @@ -131,7 +140,7 @@ func shouldRetry(err error) bool { } // Unwrap is only supported in go1.13.x+ if e, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(e.Unwrap()) + return ShouldRetry(e.Unwrap()) } return false } diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index dd43822b6a3..614feb7b6da 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -21,6 +21,7 @@ import ( "regexp" "cloud.google.com/go/internal/trace" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" raw "google.golang.org/api/storage/v1" ) @@ -91,6 +92,30 @@ func toNotification(rn *raw.Notification) *Notification { return n } +func toNotificationFromProto(pbn *storagepb.Notification) *Notification { + n := &Notification{ + ID: pbn.GetName(), + EventTypes: pbn.GetEventTypes(), + ObjectNamePrefix: pbn.GetObjectNamePrefix(), + CustomAttributes: pbn.GetCustomAttributes(), + PayloadFormat: pbn.GetPayloadFormat(), + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) + return n +} + +func toProtoNotification(n *Notification) *storagepb.Notification { + return &storagepb.Notification{ + Name: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: n.PayloadFormat, + } +} + var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") // parseNotificationTopic extracts the project and topic IDs from from the full @@ -132,21 +157,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re if n.TopicID == "" { return nil, errors.New("storage: AddNotification: missing TopicID") } - call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - var rn *raw.Notification - err = run(ctx, func() error { - rn, err = call.Context(ctx).Do() - return err - }, b.retry, false, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return toNotification(rn), nil + opts := makeStorageOpts(false, b.retry, b.userProject) + ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...) + return ret, err } // Notifications returns all the Notifications configured for this bucket, as a map @@ -155,20 +169,9 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") defer func() { trace.EndSpan(ctx, err) }() - call := b.c.raw.Notifications.List(b.name) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - var res *raw.Notifications - err = run(ctx, func() error { - res, err = call.Context(ctx).Do() - return err - }, b.retry, true, setRetryHeaderHTTP(call)) - if err != nil { - return nil, err - } - return notificationsToMap(res.Items), nil + opts := makeStorageOpts(true, b.retry, b.userProject) + n, err = b.c.tc.ListNotifications(ctx, b.name, opts...) + return n, err } func notificationsToMap(rns []*raw.Notification) map[string]*Notification { @@ -179,17 +182,19 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification { return m } +func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, n := range ns { + m[n.Name] = toNotificationFromProto(n) + } + return m +} + // DeleteNotification deletes the notification with the given ID. func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") defer func() { trace.EndSpan(ctx, err) }() - call := b.c.raw.Notifications.Delete(b.name, id) - setClientHeader(call.Header()) - if b.userProject != "" { - call.UserProject(b.userProject) - } - return run(ctx, func() error { - return call.Context(ctx).Do() - }, b.retry, true, setRetryHeaderHTTP(call)) + opts := makeStorageOpts(true, b.retry, b.userProject) + return b.c.tc.DeleteNotification(ctx, b.name, id, opts...) } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index ab108b03939..46487d2b77d 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -16,20 +16,15 @@ package storage import ( "context" - "errors" "fmt" "hash/crc32" "io" "io/ioutil" "net/http" - "net/url" - "strconv" "strings" "time" "cloud.google.com/go/internal/trace" - "google.golang.org/api/googleapi" - storagepb "google.golang.org/genproto/googleapis/storage/v2" ) var crc32cTable = crc32.MakeTable(crc32.Castagnoli) @@ -95,10 +90,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() - if o.c.gc != nil { - return o.newRangeReaderWithGRPC(ctx, offset, length) - } - if err := o.validate(); err != nil { return nil, err } @@ -110,208 +101,31 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return nil, err } } - u := &url.URL{ - Scheme: o.c.scheme, - Host: o.c.readHost, - Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), - } - verb := "GET" - if length == 0 { - verb = "HEAD" - } - req, err := http.NewRequest(verb, u.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if o.userProject != "" { - req.Header.Set("X-Goog-User-Project", o.userProject) - } - if o.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { - return nil, err - } - - gen := o.gen - // Define a function that initiates a Read with offset and length, assuming we - // have already read seen bytes. - reopen := func(seen int64) (*http.Response, error) { - // If the context has already expired, return immediately without making a - // call. - if err := ctx.Err(); err != nil { - return nil, err - } - start := offset + seen - if length < 0 && start < 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) - } else if length < 0 && start > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) - } else if length > 0 { - // The end character isn't affected by how many bytes we've seen. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) - } - // We wait to assign conditions here because the generation number can change in between reopen() runs. - if err := setConditionsHeaders(req.Header, o.conds); err != nil { - return nil, err - } - // If an object generation is specified, include generation as query string parameters. - if gen >= 0 { - req.URL.RawQuery = fmt.Sprintf("generation=%d", gen) - } + opts := makeStorageOpts(true, o.retry, o.userProject) - var res *http.Response - err = run(ctx, func() error { - res, err = o.c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - - partialContentNotSatisfied := - !decompressiveTranscoding(res) && - start > 0 && length != 0 && - res.StatusCode != http.StatusPartialContent - - if partialContentNotSatisfied { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - - // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves - // back the whole file regardless of the range count passed in as per: - // https://cloud.google.com/storage/docs/transcoding#range, - // thus we have to manually move the body forward by seen bytes. - if decompressiveTranscoding(res) && seen > 0 { - _, _ = io.CopyN(ioutil.Discard, res.Body, seen) - } - - // If a generation hasn't been specified, and this is the first response we get, let's record the - // generation. In future requests we'll use this generation as a precondition to avoid data races. - if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { - gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) - if err != nil { - return err - } - gen = gen64 - } - return nil - }, o.retry, true, setRetryHeaderHTTP(&readerRequestWrapper{req})) - if err != nil { - return nil, err - } - return res, nil + params := &newRangeReaderParams{ + bucket: o.bucket, + object: o.object, + gen: o.gen, + offset: offset, + length: length, + encryptionKey: o.encryptionKey, + conds: o.conds, + readCompressed: o.readCompressed, } - res, err := reopen(0) - if err != nil { - return nil, err - } - var ( - size int64 // total size of object, even if a range was requested. - checkCRC bool - crc uint32 - startOffset int64 // non-zero if range request. - ) - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - // Content range is formatted -/. We take - // the total size. - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } + r, err = o.c.tc.NewRangeReader(ctx, params, opts...) - dashIndex := strings.Index(cr, "-") - if dashIndex >= 0 { - startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err) - } - } - } else { - size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } - } - - remain := res.ContentLength - body := res.Body - if length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var metaGen int64 - if res.Header.Get("X-Goog-Metageneration") != "" { - metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) - if err != nil { - return nil, err - } - } - - var lm time.Time - if res.Header.Get("Last-Modified") != "" { - lm, err = http.ParseTime(res.Header.Get("Last-Modified")) - if err != nil { - return nil, err - } - } - - attrs := ReaderObjectAttrs{ - Size: size, - ContentType: res.Header.Get("Content-Type"), - ContentEncoding: res.Header.Get("Content-Encoding"), - CacheControl: res.Header.Get("Cache-Control"), - LastModified: lm, - StartOffset: startOffset, - Generation: gen, - Metageneration: metaGen, - } - return &Reader{ - Attrs: attrs, - body: body, - size: size, - remain: remain, - wantCRC: crc, - checkCRC: checkCRC, - reopen: reopen, - }, nil + return r, err } // decompressiveTranscoding returns true if the request was served decompressed // and different than its original storage form. This happens when the "Content-Encoding" // header is "gzip". // See: -// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip -// * https://github.com/googleapis/google-cloud-go/issues/1800 +// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip +// - https://github.com/googleapis/google-cloud-go/issues/1800 func decompressiveTranscoding(res *http.Response) bool { // Decompressive Transcoding. return res.Header.Get("Content-Encoding") == "gzip" || @@ -376,42 +190,21 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) // is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. type Reader struct { Attrs ReaderObjectAttrs - body io.ReadCloser seen, remain, size int64 checkCRC bool // should we check the CRC? wantCRC uint32 // the CRC32c value the server sent in the header gotCRC uint32 // running crc - reopen func(seen int64) (*http.Response, error) - - // The following fields are only for use in the gRPC hybrid client. - stream storagepb.Storage_ReadObjectClient - reopenWithGRPC func(seen int64) (*readStreamResponse, context.CancelFunc, error) - leftovers []byte - cancelStream context.CancelFunc -} -type readStreamResponse struct { - stream storagepb.Storage_ReadObjectClient - response *storagepb.ReadObjectResponse + reader io.ReadCloser } // Close closes the Reader. It must be called when done reading. func (r *Reader) Close() error { - if r.body != nil { - return r.body.Close() - } - - r.closeStream() - return nil + return r.reader.Close() } func (r *Reader) Read(p []byte) (int, error) { - read := r.readWithRetry - if r.reopenWithGRPC != nil { - read = r.readWithGRPC - } - - n, err := read(p) + n, err := r.reader.Read(p) if r.remain != -1 { r.remain -= int64(n) } @@ -430,268 +223,6 @@ func (r *Reader) Read(p []byte) (int, error) { return n, err } -// newRangeReaderWithGRPC creates a new Reader with the given range that uses -// gRPC to read Object content. -// -// This is an experimental API and not intended for public use. -func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, length int64) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.newRangeReaderWithGRPC") - defer func() { trace.EndSpan(ctx, err) }() - - if o.c.gc == nil { - err = fmt.Errorf("handle doesn't have a gRPC client initialized") - return - } - if err = o.validate(); err != nil { - return - } - - // A negative length means "read to the end of the object", but the - // read_limit field it corresponds to uses zero to mean the same thing. Thus - // we coerce the length to 0 to read to the end of the object. - if length < 0 { - length = 0 - } - - // For now, there are only globally unique buckets, and "_" is the alias - // project ID for such buckets. - b := bucketResourceName("_", o.bucket) - req := &storagepb.ReadObjectRequest{ - Bucket: b, - Object: o.object, - } - // The default is a negative value, which means latest. - if o.gen >= 0 { - req.Generation = o.gen - } - - // Define a function that initiates a Read with offset and length, assuming - // we have already read seen bytes. - reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { - // If the context has already expired, return immediately without making - // we call. - if err := ctx.Err(); err != nil { - return nil, nil, err - } - - cc, cancel := context.WithCancel(ctx) - - start := offset + seen - // Only set a ReadLimit if length is greater than zero, because zero - // means read it all. - if length > 0 { - req.ReadLimit = length - seen - } - req.ReadOffset = start - - if err := applyCondsProto("reopenWithGRPC", o.gen, o.conds, req); err != nil { - cancel() - return nil, nil, err - } - - var stream storagepb.Storage_ReadObjectClient - var msg *storagepb.ReadObjectResponse - var err error - - err = run(cc, func() error { - stream, err = o.c.gc.ReadObject(cc, req) - if err != nil { - return err - } - - msg, err = stream.Recv() - - return err - }, o.retry, true, setRetryHeaderGRPC(ctx)) - if err != nil { - // Close the stream context we just created to ensure we don't leak - // resources. - cancel() - return nil, nil, err - } - - return &readStreamResponse{stream, msg}, cancel, nil - } - - res, cancel, err := reopen(0) - if err != nil { - return nil, err - } - - r = &Reader{ - stream: res.stream, - reopenWithGRPC: reopen, - cancelStream: cancel, - } - - // The first message was Recv'd on stream open, use it to populate the - // object metadata. - msg := res.response - obj := msg.GetMetadata() - // This is the size of the entire object, even if only a range was requested. - size := obj.GetSize() - - r.Attrs = ReaderObjectAttrs{ - Size: size, - ContentType: obj.GetContentType(), - ContentEncoding: obj.GetContentEncoding(), - CacheControl: obj.GetCacheControl(), - LastModified: obj.GetUpdateTime().AsTime(), - Metageneration: obj.GetMetageneration(), - Generation: obj.GetGeneration(), - } - - r.size = size - cr := msg.GetContentRange() - if cr != nil { - r.Attrs.StartOffset = cr.GetStart() - r.remain = cr.GetEnd() - cr.GetStart() + 1 - } else { - r.remain = size - } - - // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && offset == 0 && length == 0 { - r.wantCRC = checksums.GetCrc32C() - r.checkCRC = true - } - - // Store the content from the first Recv in the client buffer for reading - // later. - r.leftovers = msg.GetChecksummedData().GetContent() - - return r, nil -} - -func (r *Reader) readWithRetry(p []byte) (int, error) { - n := 0 - for len(p[n:]) > 0 { - m, err := r.body.Read(p[n:]) - n += m - r.seen += int64(m) - if err == nil || err == io.EOF { - return n, err - } - // Read failed (likely due to connection issues), but we will try to reopen - // the pipe and continue. Send a ranged read request that takes into account - // the number of bytes we've already seen. - res, err := r.reopen(r.seen) - if err != nil { - // reopen already retries - return n, err - } - r.body.Close() - r.body = res.Body - } - return n, nil -} - -// closeStream cancels a stream's context in order for it to be closed and -// collected. -// -// This is an experimental API and not intended for public use. -func (r *Reader) closeStream() { - if r.cancelStream != nil { - r.cancelStream() - } - r.stream = nil -} - -// readWithGRPC reads bytes into the user's buffer from an open gRPC stream. -// -// This is an experimental API and not intended for public use. -func (r *Reader) readWithGRPC(p []byte) (int, error) { - // No stream to read from, either never initiliazed or Close was called. - // Note: There is a potential concurrency issue if multiple routines are - // using the same reader. One encounters an error and the stream is closed - // and then reopened while the other routine attempts to read from it. - if r.stream == nil { - return 0, fmt.Errorf("reader has been closed") - } - - // The entire object has been read by this reader, return EOF. - if r.size != 0 && r.size == r.seen { - return 0, io.EOF - } - - var n int - // Read leftovers and return what was available to conform to the Reader - // interface: https://pkg.go.dev/io#Reader. - if len(r.leftovers) > 0 { - n = copy(p, r.leftovers) - r.seen += int64(n) - r.leftovers = r.leftovers[n:] - return n, nil - } - - // Attempt to Recv the next message on the stream. - msg, err := r.recv() - if err != nil { - return 0, err - } - - // TODO: Determine if we need to capture incremental CRC32C for this - // chunk. The Object CRC32C checksum is captured when directed to read - // the entire Object. If directed to read a range, we may need to - // calculate the range's checksum for verification if the checksum is - // present in the response here. - // TODO: Figure out if we need to support decompressive transcoding - // https://cloud.google.com/storage/docs/transcoding. - content := msg.GetChecksummedData().GetContent() - n = copy(p[n:], content) - leftover := len(content) - n - if leftover > 0 { - // Wasn't able to copy all of the data in the message, store for - // future Read calls. - r.leftovers = content[n:] - } - r.seen += int64(n) - - return n, nil -} - -// recv attempts to Recv the next message on the stream. In the event -// that a retryable error is encountered, the stream will be closed, reopened, -// and Recv again. This will attempt to Recv until one of the following is true: -// -// * Recv is successful -// * A non-retryable error is encountered -// * The Reader's context is canceled -// -// The last error received is the one that is returned, which could be from -// an attempt to reopen the stream. -// -// This is an experimental API and not intended for public use. -func (r *Reader) recv() (*storagepb.ReadObjectResponse, error) { - msg, err := r.stream.Recv() - if err != nil && shouldRetry(err) { - // This will "close" the existing stream and immediately attempt to - // reopen the stream, but will backoff if further attempts are necessary. - // Reopening the stream Recvs the first message, so if retrying is - // successful, the next logical chunk will be returned. - msg, err = r.reopenStream(r.seen) - } - - return msg, err -} - -// reopenStream "closes" the existing stream and attempts to reopen a stream and -// sets the Reader's stream and cancelStream properties in the process. -// -// This is an experimental API and not intended for public use. -func (r *Reader) reopenStream(seen int64) (*storagepb.ReadObjectResponse, error) { - // Close existing stream and initialize new stream with updated offset. - r.closeStream() - - res, cancel, err := r.reopenWithGRPC(r.seen) - if err != nil { - return nil, err - } - r.stream = res.stream - r.cancelStream = cancel - return res.response, nil -} - // Size returns the size of the object in bytes. // The returned value is always the same and is not affected by // calls to Read or Close. diff --git a/vendor/cloud.google.com/go/storage/release-please-config.json b/vendor/cloud.google.com/go/storage/release-please-config.json index 86e8a5759e9..5b7b812a947 100644 --- a/vendor/cloud.google.com/go/storage/release-please-config.json +++ b/vendor/cloud.google.com/go/storage/release-please-config.json @@ -7,5 +7,6 @@ "storage": { "component": "storage" } - } + }, + "plugins": ["sentence-case"] } diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 719f39723bc..d7e06fca710 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -33,6 +33,7 @@ import ( "reflect" "regexp" "sort" + "strconv" "strings" "time" "unicode/utf8" @@ -40,7 +41,7 @@ import ( "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" "cloud.google.com/go/storage/internal" - gapic "cloud.google.com/go/storage/internal/apiv2" + storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" "github.com/googleapis/gax-go/v2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" @@ -49,7 +50,6 @@ import ( raw "google.golang.org/api/storage/v1" "google.golang.org/api/transport" htransport "google.golang.org/api/transport/http" - storagepb "google.golang.org/genproto/googleapis/storage/v2" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/known/timestamppb" @@ -84,6 +84,14 @@ const ( // ScopeReadWrite grants permissions to manage your // data in Google Cloud Storage. ScopeReadWrite = raw.DevstorageReadWriteScope + + // aes256Algorithm is the AES256 encryption algorithm used with the + // Customer-Supplied Encryption Keys feature. + aes256Algorithm = "AES256" + + // defaultGen indicates the latest object generation by default, + // using a negative value. + defaultGen = int64(-1) ) // TODO: remove this once header with invocation ID is applied to all methods. @@ -106,10 +114,12 @@ type Client struct { creds *google.Credentials retry *retryConfig - // gc is an optional gRPC-based, GAPIC client. - // - // This is an experimental field and not intended for public use. - gc *gapic.Client + // tc is the transport-agnostic client implemented with either gRPC or HTTP. + tc storageClient + // useGRPC flags whether the client uses gRPC. This is needed while the + // integration piece is only partially complete. + // TODO: remove before merging to main. + useGRPC bool } // NewClient creates a new Google Cloud Storage client. @@ -190,12 +200,18 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err) } + tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...)) + if err != nil { + return nil, fmt.Errorf("storage: %v", err) + } + return &Client{ hc: hc, raw: rawService, scheme: u.Scheme, readHost: u.Host, creds: creds, + tc: tc, }, nil } @@ -205,12 +221,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // This is an experimental API and not intended for public use. func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { opts = append(defaultGRPCOptions(), opts...) - g, err := gapic.NewClient(ctx, opts...) + tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) if err != nil { return nil, err } - return &Client{gc: g}, nil + return &Client{tc: tc, useGRPC: true}, nil } // Close closes the Client. @@ -221,8 +237,8 @@ func (c *Client) Close() error { c.hc = nil c.raw = nil c.creds = nil - if c.gc != nil { - return c.gc.Close() + if c.tc != nil { + return c.tc.Close() } return nil } @@ -504,13 +520,13 @@ func v2SanitizeHeaders(hdrs []string) []string { // at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. // // V4 does a couple things differently from V2: -// - Headers get sorted by key, instead of by key:value. We do this in -// signedURLV4. -// - There's no canonical regexp: we simply split headers on :. -// - We don't exclude canonical headers. -// - We replace leading and trailing spaces in header values, like v2, but also -// all intermediate space duplicates get stripped. That is, there's only ever -// a single consecutive space. +// - Headers get sorted by key, instead of by key:value. We do this in +// signedURLV4. +// - There's no canonical regexp: we simply split headers on :. +// - We don't exclude canonical headers. +// - We replace leading and trailing spaces in header values, like v2, but also +// all intermediate space duplicates get stripped. That is, there's only ever +// a single consecutive space. func v4SanitizeHeaders(hdrs []string) []string { headerMap := map[string][]string{} for _, hdr := range hdrs { @@ -902,27 +918,8 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error if err := o.validate(); err != nil { return nil, err } - call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) - if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true, setRetryHeaderHTTP(call)) - var e *googleapi.Error - if errors.As(err, &e) && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil + opts := makeStorageOpts(true, o.retry, o.userProject) + return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) } // Update updates an object with the provided attributes. See @@ -935,99 +932,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( if err := o.validate(); err != nil { return nil, err } - var attrs ObjectAttrs - // Lists of fields to send, and set to null, in the JSON. - var forceSendFields, nullFields []string - if uattrs.ContentType != nil { - attrs.ContentType = optional.ToString(uattrs.ContentType) - // For ContentType, sending the empty string is a no-op. - // Instead we send a null. - if attrs.ContentType == "" { - nullFields = append(nullFields, "ContentType") - } else { - forceSendFields = append(forceSendFields, "ContentType") - } - } - if uattrs.ContentLanguage != nil { - attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - // For ContentLanguage it's an error to send the empty string. - // Instead we send a null. - if attrs.ContentLanguage == "" { - nullFields = append(nullFields, "ContentLanguage") - } else { - forceSendFields = append(forceSendFields, "ContentLanguage") - } - } - if uattrs.ContentEncoding != nil { - attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - forceSendFields = append(forceSendFields, "ContentEncoding") - } - if uattrs.ContentDisposition != nil { - attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - forceSendFields = append(forceSendFields, "ContentDisposition") - } - if uattrs.CacheControl != nil { - attrs.CacheControl = optional.ToString(uattrs.CacheControl) - forceSendFields = append(forceSendFields, "CacheControl") - } - if uattrs.EventBasedHold != nil { - attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) - forceSendFields = append(forceSendFields, "EventBasedHold") - } - if uattrs.TemporaryHold != nil { - attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) - forceSendFields = append(forceSendFields, "TemporaryHold") - } - if !uattrs.CustomTime.IsZero() { - attrs.CustomTime = uattrs.CustomTime - forceSendFields = append(forceSendFields, "CustomTime") - } - if uattrs.Metadata != nil { - attrs.Metadata = uattrs.Metadata - if len(attrs.Metadata) == 0 { - // Sending the empty map is a no-op. We send null instead. - nullFields = append(nullFields, "Metadata") - } else { - forceSendFields = append(forceSendFields, "Metadata") - } - } - if uattrs.ACL != nil { - attrs.ACL = uattrs.ACL - // It's an error to attempt to delete the ACL, so - // we don't append to nullFields here. - forceSendFields = append(forceSendFields, "Acl") - } - rawObj := attrs.toRawObject(o.bucket) - rawObj.ForceSendFields = forceSendFields - rawObj.NullFields = nullFields - call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) - if err := applyConds("Update", o.gen, o.conds, call); err != nil { - return nil, err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - if uattrs.PredefinedACL != "" { - call.PredefinedAcl(uattrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - var isIdempotent bool - if o.conds != nil && o.conds.MetagenerationMatch != 0 { - isIdempotent = true - } - err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent, setRetryHeaderHTTP(call)) - var e *googleapi.Error - if errors.As(err, &e) && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil + isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...) } // BucketName returns the name of the bucket. @@ -1047,11 +954,12 @@ func (o *ObjectHandle) ObjectName() string { // // For example, to change ContentType and delete ContentEncoding and // Metadata, use -// ObjectAttrsToUpdate{ -// ContentType: "text/html", -// ContentEncoding: "", -// Metadata: map[string]string{}, -// } +// +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } type ObjectAttrsToUpdate struct { EventBasedHold optional.Bool TemporaryHold optional.Bool @@ -1074,27 +982,11 @@ func (o *ObjectHandle) Delete(ctx context.Context) error { if err := o.validate(); err != nil { return err } - call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) - if err := applyConds("Delete", o.gen, o.conds, call); err != nil { - return err - } - if o.userProject != "" { - call.UserProject(o.userProject) - } - // Encryption doesn't apply to Delete. - setClientHeader(call.Header()) - var isIdempotent bool // Delete is idempotent if GenerationMatch or Generation have been passed in. // The default generation is negative to get the latest version of the object. - if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 { - isIdempotent = true - } - err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent, setRetryHeaderHTTP(call)) - var e *googleapi.Error - if errors.As(err, &e) && e.Code == http.StatusNotFound { - return ErrObjectNotExist - } - return err + isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 + opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) + return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...) } // ReadCompressed when true causes the read to happen without decompressing. @@ -1203,8 +1095,11 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { } // For now, there are only globally unique buckets, and "_" is the alias - // project ID for such buckets. - b = bucketResourceName("_", b) + // project ID for such buckets. If the bucket is not provided, like in the + // destination ObjectAttrs of a Copy, do not attempt to format it. + if b != "" { + b = bucketResourceName(globalProjectAlias, b) + } return &storagepb.Object{ Bucket: b, @@ -1231,6 +1126,49 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { } } +// toProtoObject copies the attributes to update from uattrs to the proto library's Object type. +func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storagepb.Object { + o := &storagepb.Object{ + Name: object, + Bucket: bucket, + } + if uattrs == nil { + return o + } + + if uattrs.EventBasedHold != nil { + o.EventBasedHold = proto.Bool(optional.ToBool(uattrs.EventBasedHold)) + } + if uattrs.TemporaryHold != nil { + o.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + } + if uattrs.ContentType != nil { + o.ContentType = optional.ToString(uattrs.ContentType) + } + if uattrs.ContentLanguage != nil { + o.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + } + if uattrs.ContentEncoding != nil { + o.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + } + if uattrs.ContentDisposition != nil { + o.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + } + if uattrs.CacheControl != nil { + o.CacheControl = optional.ToString(uattrs.CacheControl) + } + if !uattrs.CustomTime.IsZero() { + o.CustomTime = toProtoTimestamp(uattrs.CustomTime) + } + if uattrs.ACL != nil { + o.Acl = toProtoObjectACL(uattrs.ACL) + } + + // TODO(cathyo): Handle metadata. Pending b/230510191. + + return o +} + // ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. type ObjectAttrs struct { // Bucket is the name of the bucket containing this GCS object. @@ -1312,6 +1250,10 @@ type ObjectAttrs struct { // Metadata represents user-provided metadata, in key/value pairs. // It can be nil if no metadata is provided. + // + // For object downloads using Reader, metadata keys are sent as headers. + // Therefore, avoid setting metadata keys using characters that are not valid + // for headers. See https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6. Metadata map[string]string // Generation is the generation number of the object's content. @@ -1786,6 +1728,33 @@ func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall return nil } +func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { + if gen >= 0 { + call.SourceGeneration = gen + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch = proto.Int64(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) + } + return nil +} + // setConditionField sets a field on a *raw.WhateverCall. // We can't use anonymous interfaces because the return type is // different, since the field setters are builders. @@ -1907,8 +1876,8 @@ func (ws *withPolicy) apply(config *retryConfig) { // WithErrorFunc allows users to pass a custom function to the retryer. Errors // will be retried if and only if `shouldRetry(err)` returns true. -// By default, the following errors are retried (see invoke.go for the default -// shouldRetry function): +// By default, the following errors are retried (see ShouldRetry for the default +// function): // // - HTTP responses with codes 408, 429, 502, 503, and 504. // @@ -1919,7 +1888,8 @@ func (ws *withPolicy) apply(config *retryConfig) { // - Wrapped versions of these errors. // // This option can be used to retry on a different set of errors than the -// default. +// default. Users can use the default ShouldRetry function inside their custom +// function if they only want to make minor modifications to default behavior. func WithErrorFunc(shouldRetry func(err error) bool) RetryOption { return &withErrorFunc{ shouldRetry: shouldRetry, @@ -1992,26 +1962,31 @@ func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) erro if copySource { cs = "copy-source-" } - headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-algorithm", aes256Algorithm) headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) keyHash := sha256.Sum256(key) headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) return nil } +// toProtoCommonObjectRequestParams sets customer-supplied encryption to the proto library's CommonObjectRequestParams. +func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequestParams { + if key == nil { + return nil + } + keyHash := sha256.Sum256(key) + return &storagepb.CommonObjectRequestParams{ + EncryptionAlgorithm: aes256Algorithm, + EncryptionKeyBytes: key, + EncryptionKeySha256Bytes: keyHash[:], + } +} + // ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { - r := c.raw.Projects.ServiceAccount.Get(projectID) - var res *raw.ServiceAccount - var err error - err = run(ctx, func() error { - res, err = r.Context(ctx).Do() - return err - }, c.retry, true, setRetryHeaderHTTP(r)) - if err != nil { - return "", err - } - return res.EmailAddress, nil + o := makeStorageOpts(true, c.retry, "") + return c.tc.GetServiceAccount(ctx, projectID, o...) + } // bucketResourceName formats the given project ID and bucketResourceName ID @@ -2028,6 +2003,24 @@ func parseBucketName(b string) string { return b[sep+1:] } +// parseProjectNumber consume the given resource name and parses out the project +// number if one is present i.e. it is not a project ID. +func parseProjectNumber(r string) uint64 { + projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`) + if matches := projectID.FindStringSubmatch(r); len(matches) > 0 { + // Capture group follows the matched segment. For example: + // input: projects/123/bars/456 + // output: [projects/123/, 123] + number, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return 0 + } + return number + } + + return 0 +} + // toProjectResource accepts a project ID and formats it as a Project resource // name. func toProjectResource(project string) string { diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index c616603e4c6..91229f148a6 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -16,28 +16,12 @@ package storage import ( "context" - "encoding/base64" "errors" "fmt" "io" "sync" "time" "unicode/utf8" - - "github.com/golang/protobuf/proto" - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" - storagepb "google.golang.org/genproto/googleapis/storage/v2" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - // Maximum amount of content that can be sent per WriteObjectRequest message. - // A buffer reaching this amount will precipitate a flush of the buffer. - // - // This is only used for the gRPC-based Writer. - maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES) ) // A Writer writes a Cloud Storage object. @@ -123,112 +107,6 @@ type Writer struct { mu sync.Mutex err error - - // The gRPC client-stream used for sending buffers. - // - // This is an experimental API and not intended for public use. - stream storagepb.Storage_WriteObjectClient - - // The Resumable Upload ID started by a gRPC-based Writer. - // - // This is an experimental API and not intended for public use. - upid string -} - -func (w *Writer) open() error { - if err := w.validateWriteAttrs(); err != nil { - return err - } - - pr, pw := io.Pipe() - w.pw = pw - w.opened = true - - go w.monitorCancel() - - attrs := w.ObjectAttrs - mediaOpts := []googleapi.MediaOption{ - googleapi.ChunkSize(w.ChunkSize), - } - if c := attrs.ContentType; c != "" { - mediaOpts = append(mediaOpts, googleapi.ContentType(c)) - } - if w.ChunkRetryDeadline != 0 { - mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(w.ChunkRetryDeadline)) - } - - go func() { - defer close(w.donec) - - rawObj := attrs.toRawObject(w.o.bucket) - if w.SendCRC32C { - rawObj.Crc32c = encodeUint32(attrs.CRC32C) - } - if w.MD5 != nil { - rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) - } - call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). - Media(pr, mediaOpts...). - Projection("full"). - Context(w.ctx). - Name(w.o.object) - - if w.ProgressFunc != nil { - call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) - } - if attrs.KMSKeyName != "" { - call.KmsKeyName(attrs.KMSKeyName) - } - if attrs.PredefinedACL != "" { - call.PredefinedAcl(attrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { - w.mu.Lock() - w.err = err - w.mu.Unlock() - pr.CloseWithError(err) - return - } - var resp *raw.Object - err := applyConds("NewWriter", w.o.gen, w.o.conds, call) - if err == nil { - if w.o.userProject != "" { - call.UserProject(w.o.userProject) - } - setClientHeader(call.Header()) - - // The internals that perform call.Do automatically retry both the initial - // call to set up the upload as well as calls to upload individual chunks - // for a resumable upload (as long as the chunk size is non-zero). Hence - // there is no need to add retries here. - - // Retry only when the operation is idempotent or the retry policy is RetryAlways. - isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) - var useRetry bool - if (w.o.retry == nil || w.o.retry.policy == RetryIdempotent) && isIdempotent { - useRetry = true - } else if w.o.retry != nil && w.o.retry.policy == RetryAlways { - useRetry = true - } - if useRetry { - if w.o.retry != nil { - call.WithRetry(w.o.retry.backoff, w.o.retry.shouldRetry) - } else { - call.WithRetry(nil, nil) - } - } - resp, err = call.Do() - } - if err != nil { - w.mu.Lock() - w.err = err - w.mu.Unlock() - pr.CloseWithError(err) - return - } - w.obj = newObject(resp) - }() - return nil } // Write appends to w. It implements the io.Writer interface. @@ -248,12 +126,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { return 0, werr } if !w.opened { - // gRPC client has been initialized - use gRPC to upload. - if w.o.c.gc != nil { - if err := w.openGRPC(); err != nil { - return 0, err - } - } else if err := w.open(); err != nil { + if err := w.openWriter(); err != nil { return 0, err } } @@ -277,7 +150,7 @@ func (w *Writer) Write(p []byte) (n int, err error) { // can be retrieved by calling Attrs. func (w *Writer) Close() error { if !w.opened { - if err := w.open(); err != nil { + if err := w.openWriter(); err != nil { return err } } @@ -293,6 +166,40 @@ func (w *Writer) Close() error { return w.err } +func (w *Writer) openWriter() (err error) { + if err := w.validateWriteAttrs(); err != nil { + return err + } + if w.o.gen != defaultGen { + return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen) + } + + isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) + opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) + go w.monitorCancel() + params := &openWriterParams{ + ctx: w.ctx, + chunkSize: w.ChunkSize, + chunkRetryDeadline: w.ChunkRetryDeadline, + bucket: w.o.bucket, + attrs: &w.ObjectAttrs, + conds: w.o.conds, + encryptionKey: w.o.encryptionKey, + sendCRC32C: w.SendCRC32C, + donec: w.donec, + setError: w.error, + progress: w.progress, + setObj: func(o *ObjectAttrs) { w.obj = o }, + } + w.pw, err = w.o.c.tc.OpenWriter(params, opts...) + if err != nil { + return err + } + w.opened = true + + return nil +} + // monitorCancel is intended to be used as a background goroutine. It monitors the // context, and when it observes that the context has been canceled, it manually // closes things that do not take a context. @@ -361,333 +268,3 @@ func (w *Writer) error(err error) { w.err = err w.mu.Unlock() } - -// openGRPC initializes a pipe for the user to write data to, and a routine to -// read from that pipe and upload the data to GCS via gRPC. -// -// This is an experimental API and not intended for public use. -func (w *Writer) openGRPC() error { - if err := w.validateWriteAttrs(); err != nil { - return err - } - - pr, pw := io.Pipe() - w.pw = pw - w.opened = true - - go w.monitorCancel() - - bufSize := w.ChunkSize - if w.ChunkSize == 0 { - // TODO: Should we actually use the minimum of 256 KB here when the user - // indicates they want minimal memory usage? We cannot do a zero-copy, - // bufferless upload like HTTP/JSON can. - // TODO: We need to determine if we can avoid starting a - // resumable upload when the user *plans* to send more than bufSize but - // with a bufferless upload. - bufSize = maxPerMessageWriteSize - } - buf := make([]byte, bufSize) - - var offset int64 - - // This function reads the data sent to the pipe and sends sets of messages - // on the gRPC client-stream as the buffer is filled. - go func() { - defer close(w.donec) - - // Loop until there is an error or the Object has been finalized. - for { - // Note: This blocks until either the buffer is full or EOF is read. - recvd, doneReading, err := read(pr, buf) - if err != nil { - err = checkCanceled(err) - w.error(err) - pr.CloseWithError(err) - return - } - toWrite := buf[:recvd] - - // TODO: Figure out how to set up encryption via CommonObjectRequestParams. - - // The chunk buffer is full, but there is no end in sight. This - // means that a resumable upload will need to be used to send - // multiple chunks, until we are done reading data. Start a - // resumable upload if it has not already been started. - // Otherwise, all data will be sent over a single gRPC stream. - if !doneReading && w.upid == "" { - err = w.startResumableUpload() - if err != nil { - err = checkCanceled(err) - w.error(err) - pr.CloseWithError(err) - return - } - } - - o, off, finalized, err := w.uploadBuffer(toWrite, recvd, offset, doneReading) - if err != nil { - err = checkCanceled(err) - w.error(err) - pr.CloseWithError(err) - return - } - // At this point, the current buffer has been uploaded. Capture the - // committed offset here in case the upload was not finalized and - // another chunk is to be uploaded. - offset = off - w.progress(offset) - - // When we are done reading data and the chunk has been finalized, - // we are done. - if doneReading && finalized { - // Build Object from server's response. - w.obj = newObjectFromProto(o) - return - } - } - }() - - return nil -} - -// startResumableUpload initializes a Resumable Upload with gRPC and sets the -// upload ID on the Writer. -// -// This is an experimental API and not intended for public use. -func (w *Writer) startResumableUpload() error { - spec, err := w.writeObjectSpec() - if err != nil { - return err - } - upres, err := w.o.c.gc.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{ - WriteObjectSpec: spec, - }) - - w.upid = upres.GetUploadId() - return err -} - -// queryProgress is a helper that queries the status of the resumable upload -// associated with the given upload ID. -// -// This is an experimental API and not intended for public use. -func (w *Writer) queryProgress() (int64, error) { - q, err := w.o.c.gc.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid}) - - // q.GetCommittedSize() will return 0 if q is nil. - return q.GetPersistedSize(), err -} - -// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if -// uploading a chunk for a resumable uploadBuffer), and will mark the write as -// finished if we are done receiving data from the user. The resulting write -// offset after uploading the buffer is returned, as well as a boolean -// indicating if the Object has been finalized. If it has been finalized, the -// final Object will be returned as well. Finalizing the upload is primarily -// important for Resumable Uploads. A simple or multi-part upload will always -// be finalized once the entire buffer has been written. -// -// This is an experimental API and not intended for public use. -func (w *Writer) uploadBuffer(buf []byte, recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) { - var err error - var finishWrite bool - var sent, limit int = 0, maxPerMessageWriteSize - offset := start - for { - first := sent == 0 - // This indicates that this is the last message and the remaining - // data fits in one message. - belowLimit := recvd-sent <= limit - if belowLimit { - limit = recvd - sent - } - if belowLimit && doneReading { - finishWrite = true - } - - // Prepare chunk section for upload. - data := buf[sent : sent+limit] - req := &storagepb.WriteObjectRequest{ - Data: &storagepb.WriteObjectRequest_ChecksummedData{ - ChecksummedData: &storagepb.ChecksummedData{ - Content: data, - }, - }, - WriteOffset: offset, - FinishWrite: finishWrite, - } - - // Open a new stream and set the first_message field on the request. - // The first message on the WriteObject stream must either be the - // Object or the Resumable Upload ID. - if first { - w.stream, err = w.o.c.gc.WriteObject(w.ctx) - if err != nil { - return nil, 0, false, err - } - - if w.upid != "" { - req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid} - } else { - spec, err := w.writeObjectSpec() - if err != nil { - return nil, 0, false, err - } - req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ - WriteObjectSpec: spec, - } - } - - // TODO: Currently the checksums are only sent on the first message - // of the stream, but in the future, we must also support sending it - // on the *last* message of the stream (instead of the first). - if w.SendCRC32C { - req.ObjectChecksums = &storagepb.ObjectChecksums{ - Crc32C: proto.Uint32(w.CRC32C), - Md5Hash: w.MD5, - } - } - } - - err = w.stream.Send(req) - if err == io.EOF { - // err was io.EOF. The client-side of a stream only gets an EOF on Send - // when the backend closes the stream and wants to return an error - // status. Closing the stream receives the status as an error. - _, err = w.stream.CloseAndRecv() - - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. - if shouldRetry(err) { - sent = 0 - finishWrite = false - // TODO: Add test case for failure modes of querying progress. - offset, err = w.determineOffset(start) - if err == nil { - continue - } - } - } - if err != nil { - return nil, 0, false, err - } - - // Update the immediate stream's sent total and the upload offset with - // the data sent. - sent += len(data) - offset += int64(len(data)) - - // Not done sending data, do not attempt to commit it yet, loop around - // and send more data. - if recvd-sent > 0 { - continue - } - - // Done sending data. Close the stream to "commit" the data sent. - resp, finalized, err := w.commit() - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. - if shouldRetry(err) { - sent = 0 - finishWrite = false - offset, err = w.determineOffset(start) - if err == nil { - continue - } - } - if err != nil { - return nil, 0, false, err - } - - return resp.GetResource(), offset, finalized, nil - } -} - -// determineOffset either returns the offset given to it in the case of a simple -// upload, or queries the write status in the case a resumable upload is being -// used. -// -// This is an experimental API and not intended for public use. -func (w *Writer) determineOffset(offset int64) (int64, error) { - // For a Resumable Upload, we must start from however much data - // was committed. - if w.upid != "" { - committed, err := w.queryProgress() - if err != nil { - return 0, err - } - offset = committed - } - return offset, nil -} - -// commit closes the stream to commit the data sent and potentially receive -// the finalized object if finished uploading. If the last request sent -// indicated that writing was finished, the Object will be finalized and -// returned. If not, then the Object will be nil, and the boolean returned will -// be false. -// -// This is an experimental API and not intended for public use. -func (w *Writer) commit() (*storagepb.WriteObjectResponse, bool, error) { - finalized := true - resp, err := w.stream.CloseAndRecv() - if err == io.EOF { - // Closing a stream for a resumable upload finish_write = false results - // in an EOF which can be ignored, as we aren't done uploading yet. - finalized = false - err = nil - } - // Drop the stream reference as it has been closed. - w.stream = nil - - return resp, finalized, err -} - -// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's -// ObjectAttrs and applies its Conditions. This is only used for gRPC. -// -// This is an experimental API and not intended for public use. -func (w *Writer) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { - spec := &storagepb.WriteObjectSpec{ - Resource: w.ObjectAttrs.toProtoObject(w.o.bucket), - } - // WriteObject doesn't support the generation condition, so use -1. - if err := applyCondsProto("WriteObject", -1, w.o.conds, spec); err != nil { - return nil, err - } - return spec, nil -} - -// read copies the data in the reader to the given buffer and reports how much -// data was read into the buffer and if there is no more data to read (EOF). -// -// This is an experimental API and not intended for public use. -func read(r io.Reader, buf []byte) (int, bool, error) { - // Set n to -1 to start the Read loop. - var n, recvd int = -1, 0 - var err error - for err == nil && n != 0 { - // The routine blocks here until data is received. - n, err = r.Read(buf[recvd:]) - recvd += n - } - var done bool - if err == io.EOF { - done = true - err = nil - } - return recvd, done, err -} - -func checkCanceled(err error) error { - if status.Code(err) == codes.Canceled { - return context.Canceled - } - - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 8d65ca1d64d..cad3b9a4883 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -31,12 +31,12 @@ func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { // allow you to get a list of the partitions in the order the endpoints // will be resolved in. // -// resolver, err := endpoints.DecodeModel(reader) +// resolver, err := endpoints.DecodeModel(reader) // -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { var opts DecodeModelOptions opts.Set(optFns...) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 0efbd5d8229..1ec4a1ee867 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -33,6 +33,7 @@ const ( EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeCentral1RegionID = "me-central-1" // Middle East (UAE). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). @@ -186,6 +187,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "Europe (Paris)", }, + "me-central-1": region{ + Description: "Middle East (UAE)", + }, "me-south-1": region{ Description: "Middle East (Bahrain)", }, @@ -314,6 +318,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -437,6 +444,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1298,6 +1308,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "api.ecr.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -1677,6 +1695,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -1920,107 +1941,6 @@ var awsPartition = partition{ }, }, "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "app-integrations": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -2052,6 +1972,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2071,71 +1997,52 @@ var awsPartition = partition{ Region: "eu-west-3", }: endpoint{}, endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ - Region: "eu-north-1", - }: endpoint{}, + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ - Region: "eu-south-1", - }: endpoint{}, + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ - Region: "eu-west-1", - }: endpoint{}, + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ - Region: "eu-west-2", - }: endpoint{}, + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ - Region: "eu-west-3", + Region: "me-central-1", }: endpoint{}, endpointKey{ Region: "me-south-1", @@ -2146,18 +2053,42 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + }, }, }, - "appflow": service{ + "app-integrations": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -2168,9 +2099,6 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2183,38 +2111,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, }, }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, + "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -2264,6 +2172,206 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2443,6 +2551,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2524,6 +2653,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.us-east-1.api.aws", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{}, @@ -2533,6 +2683,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.us-east-2.api.aws", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{}, @@ -2542,6 +2713,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.us-west-1.api.aws", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -2551,6 +2743,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "appmesh.us-west-2.api.aws", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "apprunner": service{ @@ -2833,6 +3046,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3029,6 +3245,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3261,6 +3480,22 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -3443,6 +3678,94 @@ var awsPartition = partition{ }, }, }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + }, + }, + }, "catalog.marketplace": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -3777,6 +4100,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3912,6 +4238,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4073,6 +4402,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4491,6 +4823,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4833,6 +5168,64 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "codestar-notifications": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "cognito-identity": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -5503,6 +5896,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -5589,12 +5985,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", + }, }, }, "contact-lens": service{ @@ -5625,6 +6051,115 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "cur": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -5981,21 +6516,81 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + }, }, }, "dataexchange": service{ @@ -6259,12 +6854,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -6274,6 +6878,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -6301,6 +6911,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -6319,6 +6932,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "devops-guru-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -6416,6 +7032,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6632,6 +7251,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6854,6 +7476,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6921,6 +7546,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7116,6 +7744,9 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7459,6 +8090,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7613,6 +8247,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7879,6 +8516,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8580,6 +9220,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8733,6 +9376,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -9034,9 +9680,51 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -9046,6 +9734,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -9055,6 +9761,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -9064,6 +9773,24 @@ var awsPartition = partition{ }: endpoint{ Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -9148,6 +9875,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -9314,6 +10044,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10114,24 +10847,9 @@ var awsPartition = partition{ }, "fsx": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10150,21 +10868,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -10201,15 +10910,6 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "fips-prod-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-prod-us-west-2", }: endpoint{ @@ -10237,15 +10937,6 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -10255,9 +10946,6 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, endpointKey{ Region: "prod-ca-central-1", }: endpoint{ @@ -10312,24 +11000,6 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "prod-us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "prod-us-west-2", }: endpoint{ @@ -10348,9 +11018,6 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -10369,15 +11036,6 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.us-east-2.amazonaws.com", }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -10670,6 +11328,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11686,6 +12347,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11695,15 +12362,69 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", + }, }, }, "ioteventsdata": service{ @@ -11756,6 +12477,15 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -11780,6 +12510,42 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -11788,6 +12554,15 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, endpointKey{ Region: "us-east-2", }: endpoint{ @@ -11796,6 +12571,15 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -11804,6 +12588,25 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotfleetwise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, "iotsecuredtunneling": service{ @@ -12166,6 +12969,18 @@ var awsPartition = partition{ }, "ivschat": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -12203,6 +13018,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12452,6 +13270,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12522,6 +13343,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12905,6 +13729,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -13324,6 +14166,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -13793,6 +14638,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14770,6 +15618,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14841,6 +15692,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14882,6 +15736,31 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "migrationhub-orchestrator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "migrationhub-strategy": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15115,6 +15994,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16041,6 +16923,70 @@ var awsPartition = partition{ }, }, }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + }, + }, + }, "personalize": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -16128,6 +17074,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16907,9 +17856,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -16928,6 +17886,54 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16937,15 +17943,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + }, }, }, "rds": service{ @@ -17013,6 +18043,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17231,6 +18264,112 @@ var awsPartition = partition{ }, }, }, + "rds-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + }, + }, + }, "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -17332,6 +18471,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17887,6 +19029,9 @@ var awsPartition = partition{ }, "rolesanywhere": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -17908,6 +19053,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17917,6 +19065,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -18250,6 +19401,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18603,6 +19757,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-central-1.amazonaws.com", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19524,6 +20687,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20695,6 +21861,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -21028,6 +22197,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21164,6 +22336,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21312,6 +22487,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21555,6 +22733,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21820,6 +23001,9 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21899,6 +23083,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21992,6 +23179,19 @@ var awsPartition = partition{ }, }, }, + "supportapp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "swf": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -22078,6 +23278,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22208,6 +23411,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22302,6 +23508,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22993,12 +24202,32 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "waf": service{ @@ -24364,6 +25593,9 @@ var awsPartition = partition{ }, "workspaces": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -24446,9 +25678,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -24543,6 +25784,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -24880,6 +26124,16 @@ var awscnPartition = partition{ }, }, }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "ce": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -25720,6 +26974,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -27012,6 +28276,26 @@ var awsusgovPartition = partition{ }, }, }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "cloudcontrolapi": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28805,13 +30089,37 @@ var awsusgovPartition = partition{ }, "iotevents": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + }, }, }, "ioteventsdata": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -28820,6 +30128,15 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "iotsecuredtunneling": service{ @@ -29066,6 +30383,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -29075,6 +30398,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -29163,6 +30492,13 @@ var awsusgovPartition = partition{ }, }, }, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "mediaconvert": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29503,6 +30839,18 @@ var awsusgovPartition = partition{ }, }, }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "pinpoint": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -29612,6 +30960,46 @@ var awsusgovPartition = partition{ }, }, }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "rds": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -31239,6 +32627,16 @@ var awsusgovPartition = partition{ }, }, }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "workspaces": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31389,6 +32787,9 @@ var awsisoPartition = partition{ }, "appconfigdata": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, @@ -32144,6 +33545,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -32463,6 +33871,20 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32491,6 +33913,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "route53": service{ PartitionEndpoint: "aws-iso-b-global", IsRegionalized: boxedFalse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index 84316b92c05..66dec6bebf0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -9,7 +9,7 @@ // AWS GovCloud (US) (aws-us-gov). // . // -// Enumerating Regions and Endpoint Metadata +// # Enumerating Regions and Endpoint Metadata // // Casting the Resolver returned by DefaultResolver to a EnumPartitions interface // will allow you to get access to the list of underlying Partitions with the @@ -17,22 +17,22 @@ // resolving to a single partition, or enumerate regions, services, and endpoints // in the partition. // -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() // -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } // -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } // -// Using Custom Endpoints +// # Using Custom Endpoints // // The endpoints package also gives you the ability to use your own logic how // endpoints are resolved. This is a great way to define a custom endpoint @@ -47,20 +47,19 @@ // of Resolver.EndpointFor, converting it to a type that satisfies the // Resolver interface. // +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } // -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } // -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 880986157db..a686a48fa27 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -353,10 +353,12 @@ type EnumPartitions interface { // as the second parameter. // // This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) // // This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +// +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { for _, p := range ps { if p.ID() != partitionID { @@ -423,8 +425,8 @@ func (p Partition) ID() string { return p.id } // of new regions and services expansions. // // Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError +// - UnknownServiceError +// - UnknownEndpointError func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return p.p.EndpointFor(service, region, opts...) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index a1e5a94a5d5..c09688d1c04 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.72" +const SDKVersion = "1.44.109" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index 831b0110c54..2c0cbba909b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -3,6 +3,7 @@ package query import ( "encoding/xml" "fmt" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -62,7 +63,7 @@ func UnmarshalError(r *request.Request) { } r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), + awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil), r.HTTPResponse.StatusCode, reqID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index a64a692d5b8..273b2da221d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -29,14 +29,13 @@ const opAddPermission = "AddPermission" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AddPermissionRequest method. +// req, resp := client.AddPermissionRequest(params) // -// // Example sending a request using the AddPermissionRequest method. -// req, resp := client.AddPermissionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/AddPermission func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { @@ -69,17 +68,18 @@ func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // API operation AddPermission for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/AddPermission func (c *SNS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) { @@ -119,14 +119,13 @@ const opCheckIfPhoneNumberIsOptedOut = "CheckIfPhoneNumberIsOptedOut" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CheckIfPhoneNumberIsOptedOutRequest method. +// req, resp := client.CheckIfPhoneNumberIsOptedOutRequest(params) // -// // Example sending a request using the CheckIfPhoneNumberIsOptedOutRequest method. -// req, resp := client.CheckIfPhoneNumberIsOptedOutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CheckIfPhoneNumberIsOptedOut func (c *SNS) CheckIfPhoneNumberIsOptedOutRequest(input *CheckIfPhoneNumberIsOptedOutInput) (req *request.Request, output *CheckIfPhoneNumberIsOptedOutOutput) { @@ -162,18 +161,19 @@ func (c *SNS) CheckIfPhoneNumberIsOptedOutRequest(input *CheckIfPhoneNumberIsOpt // API operation CheckIfPhoneNumberIsOptedOut for usage and error information. // // Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CheckIfPhoneNumberIsOptedOut func (c *SNS) CheckIfPhoneNumberIsOptedOut(input *CheckIfPhoneNumberIsOptedOutInput) (*CheckIfPhoneNumberIsOptedOutOutput, error) { @@ -213,14 +213,13 @@ const opConfirmSubscription = "ConfirmSubscription" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ConfirmSubscriptionRequest method. +// req, resp := client.ConfirmSubscriptionRequest(params) // -// // Example sending a request using the ConfirmSubscriptionRequest method. -// req, resp := client.ConfirmSubscriptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ConfirmSubscription func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req *request.Request, output *ConfirmSubscriptionOutput) { @@ -255,25 +254,26 @@ func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req * // API operation ConfirmSubscription for usage and error information. // // Returned Error Codes: -// * ErrCodeSubscriptionLimitExceededException "SubscriptionLimitExceeded" -// Indicates that the customer already owns the maximum allowed number of subscriptions. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeSubscriptionLimitExceededException "SubscriptionLimitExceeded" +// Indicates that the customer already owns the maximum allowed number of subscriptions. +// +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" -// Indicates that the number of filter polices in your Amazon Web Services account -// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit -// Increase case in the Amazon Web Services Support Center. +// - ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" +// Indicates that the number of filter polices in your Amazon Web Services account +// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit +// Increase case in the Amazon Web Services Support Center. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ConfirmSubscription func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubscriptionOutput, error) { @@ -313,14 +313,13 @@ const opCreatePlatformApplication = "CreatePlatformApplication" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreatePlatformApplicationRequest method. +// req, resp := client.CreatePlatformApplicationRequest(params) // -// // Example sending a request using the CreatePlatformApplicationRequest method. -// req, resp := client.CreatePlatformApplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformApplication func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationInput) (req *request.Request, output *CreatePlatformApplicationOutput) { @@ -349,26 +348,26 @@ func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationI // PlatformPrincipal and PlatformCredential are received from the notification // service. // -// * For ADM, PlatformPrincipal is client id and PlatformCredential is client -// secret. +// - For ADM, PlatformPrincipal is client id and PlatformCredential is client +// secret. // -// * For Baidu, PlatformPrincipal is API key and PlatformCredential is secret -// key. +// - For Baidu, PlatformPrincipal is API key and PlatformCredential is secret +// key. // -// * For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal -// is SSL certificate and PlatformCredential is private key. +// - For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal +// is SSL certificate and PlatformCredential is private key. // -// * For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal -// is signing key ID and PlatformCredential is signing key. +// - For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal +// is signing key ID and PlatformCredential is signing key. // -// * For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and -// the PlatformCredential is API key. +// - For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and +// the PlatformCredential is API key. // -// * For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential -// is private key. +// - For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential +// is private key. // -// * For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential -// is secret key. +// - For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential +// is secret key. // // You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint // action. @@ -381,14 +380,15 @@ func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationI // API operation CreatePlatformApplication for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformApplication func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) (*CreatePlatformApplicationOutput, error) { @@ -428,14 +428,13 @@ const opCreatePlatformEndpoint = "CreatePlatformEndpoint" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreatePlatformEndpointRequest method. +// req, resp := client.CreatePlatformEndpointRequest(params) // -// // Example sending a request using the CreatePlatformEndpointRequest method. -// req, resp := client.CreatePlatformEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformEndpoint func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) (req *request.Request, output *CreatePlatformEndpointOutput) { @@ -478,17 +477,18 @@ func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) // API operation CreatePlatformEndpoint for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreatePlatformEndpoint func (c *SNS) CreatePlatformEndpoint(input *CreatePlatformEndpointInput) (*CreatePlatformEndpointOutput, error) { @@ -528,14 +528,13 @@ const opCreateSMSSandboxPhoneNumber = "CreateSMSSandboxPhoneNumber" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateSMSSandboxPhoneNumberRequest method. +// req, resp := client.CreateSMSSandboxPhoneNumberRequest(params) // -// // Example sending a request using the CreateSMSSandboxPhoneNumberRequest method. -// req, resp := client.CreateSMSSandboxPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateSMSSandboxPhoneNumber func (c *SNS) CreateSMSSandboxPhoneNumberRequest(input *CreateSMSSandboxPhoneNumberInput) (req *request.Request, output *CreateSMSSandboxPhoneNumberOutput) { @@ -578,26 +577,27 @@ func (c *SNS) CreateSMSSandboxPhoneNumberRequest(input *CreateSMSSandboxPhoneNum // API operation CreateSMSSandboxPhoneNumber for usage and error information. // // Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeOptedOutException "OptedOut" -// Indicates that the specified phone number opted out of receiving SMS messages -// from your Amazon Web Services account. You can't send SMS messages to phone -// numbers that opt out. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeUserErrorException "UserError" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeOptedOutException "OptedOut" +// Indicates that the specified phone number opted out of receiving SMS messages +// from your Amazon Web Services account. You can't send SMS messages to phone +// numbers that opt out. // -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. +// - ErrCodeUserErrorException "UserError" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateSMSSandboxPhoneNumber func (c *SNS) CreateSMSSandboxPhoneNumber(input *CreateSMSSandboxPhoneNumberInput) (*CreateSMSSandboxPhoneNumberOutput, error) { @@ -637,14 +637,13 @@ const opCreateTopic = "CreateTopic" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateTopicRequest method. +// req, resp := client.CreateTopicRequest(params) // -// // Example sending a request using the CreateTopicRequest method. -// req, resp := client.CreateTopicRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateTopic func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, output *CreateTopicOutput) { @@ -680,36 +679,37 @@ func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, // API operation CreateTopic for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeTopicLimitExceededException "TopicLimitExceeded" -// Indicates that the customer already owns the maximum allowed number of topics. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeTopicLimitExceededException "TopicLimitExceeded" +// Indicates that the customer already owns the maximum allowed number of topics. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. // -// * ErrCodeTagLimitExceededException "TagLimitExceeded" -// Can't add more than 50 tags to a topic. +// - ErrCodeTagLimitExceededException "TagLimitExceeded" +// Can't add more than 50 tags to a topic. // -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. +// - ErrCodeStaleTagException "StaleTag" +// A tag has been added to a resource with the same ARN as a deleted resource. +// Wait a short while and then retry the operation. // -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. +// - ErrCodeTagPolicyException "TagPolicy" +// The request doesn't comply with the IAM tag policy. Correct your request +// and then retry it. // -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. +// - ErrCodeConcurrentAccessException "ConcurrentAccess" +// Can't perform multiple operations on a tag simultaneously. Perform the operations +// sequentially. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/CreateTopic func (c *SNS) CreateTopic(input *CreateTopicInput) (*CreateTopicOutput, error) { @@ -749,14 +749,13 @@ const opDeleteEndpoint = "DeleteEndpoint" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteEndpointRequest method. +// req, resp := client.DeleteEndpointRequest(params) // -// // Example sending a request using the DeleteEndpointRequest method. -// req, resp := client.DeleteEndpointRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteEndpoint func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { @@ -793,14 +792,15 @@ func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Re // API operation DeleteEndpoint for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteEndpoint func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { @@ -840,14 +840,13 @@ const opDeletePlatformApplication = "DeletePlatformApplication" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeletePlatformApplicationRequest method. +// req, resp := client.DeletePlatformApplicationRequest(params) // -// // Example sending a request using the DeletePlatformApplicationRequest method. -// req, resp := client.DeletePlatformApplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeletePlatformApplication func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationInput) (req *request.Request, output *DeletePlatformApplicationOutput) { @@ -881,14 +880,15 @@ func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationI // API operation DeletePlatformApplication for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. +// +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeletePlatformApplication func (c *SNS) DeletePlatformApplication(input *DeletePlatformApplicationInput) (*DeletePlatformApplicationOutput, error) { @@ -928,14 +928,13 @@ const opDeleteSMSSandboxPhoneNumber = "DeleteSMSSandboxPhoneNumber" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteSMSSandboxPhoneNumberRequest method. +// req, resp := client.DeleteSMSSandboxPhoneNumberRequest(params) // -// // Example sending a request using the DeleteSMSSandboxPhoneNumberRequest method. -// req, resp := client.DeleteSMSSandboxPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteSMSSandboxPhoneNumber func (c *SNS) DeleteSMSSandboxPhoneNumberRequest(input *DeleteSMSSandboxPhoneNumberInput) (req *request.Request, output *DeleteSMSSandboxPhoneNumberOutput) { @@ -978,25 +977,26 @@ func (c *SNS) DeleteSMSSandboxPhoneNumberRequest(input *DeleteSMSSandboxPhoneNum // API operation DeleteSMSSandboxPhoneNumber for usage and error information. // // Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. +// - ErrCodeResourceNotFoundException "ResourceNotFound" +// Can’t perform the action on the specified resource. Make sure that the +// resource exists. // -// * ErrCodeUserErrorException "UserError" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeUserErrorException "UserError" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteSMSSandboxPhoneNumber func (c *SNS) DeleteSMSSandboxPhoneNumber(input *DeleteSMSSandboxPhoneNumberInput) (*DeleteSMSSandboxPhoneNumberOutput, error) { @@ -1036,14 +1036,13 @@ const opDeleteTopic = "DeleteTopic" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteTopicRequest method. +// req, resp := client.DeleteTopicRequest(params) // -// // Example sending a request using the DeleteTopicRequest method. -// req, resp := client.DeleteTopicRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteTopic func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, output *DeleteTopicOutput) { @@ -1078,29 +1077,30 @@ func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, // API operation DeleteTopic for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. +// - ErrCodeStaleTagException "StaleTag" +// A tag has been added to a resource with the same ARN as a deleted resource. +// Wait a short while and then retry the operation. // -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. +// - ErrCodeTagPolicyException "TagPolicy" +// The request doesn't comply with the IAM tag policy. Correct your request +// and then retry it. // -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. +// - ErrCodeConcurrentAccessException "ConcurrentAccess" +// Can't perform multiple operations on a tag simultaneously. Perform the operations +// sequentially. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/DeleteTopic func (c *SNS) DeleteTopic(input *DeleteTopicInput) (*DeleteTopicOutput, error) { @@ -1124,6 +1124,99 @@ func (c *SNS) DeleteTopicWithContext(ctx aws.Context, input *DeleteTopicInput, o return out, req.Send() } +const opGetDataProtectionPolicy = "GetDataProtectionPolicy" + +// GetDataProtectionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetDataProtectionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDataProtectionPolicy for more information on using the GetDataProtectionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetDataProtectionPolicyRequest method. +// req, resp := client.GetDataProtectionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetDataProtectionPolicy +func (c *SNS) GetDataProtectionPolicyRequest(input *GetDataProtectionPolicyInput) (req *request.Request, output *GetDataProtectionPolicyOutput) { + op := &request.Operation{ + Name: opGetDataProtectionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDataProtectionPolicyInput{} + } + + output = &GetDataProtectionPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDataProtectionPolicy API operation for Amazon Simple Notification Service. +// +// Retrieves the specified inline DataProtectionPolicy document that is stored +// in the specified Amazon SNS topic. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Notification Service's +// API operation GetDataProtectionPolicy for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. +// +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. +// +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetDataProtectionPolicy +func (c *SNS) GetDataProtectionPolicy(input *GetDataProtectionPolicyInput) (*GetDataProtectionPolicyOutput, error) { + req, out := c.GetDataProtectionPolicyRequest(input) + return out, req.Send() +} + +// GetDataProtectionPolicyWithContext is the same as GetDataProtectionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetDataProtectionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SNS) GetDataProtectionPolicyWithContext(ctx aws.Context, input *GetDataProtectionPolicyInput, opts ...request.Option) (*GetDataProtectionPolicyOutput, error) { + req, out := c.GetDataProtectionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetEndpointAttributes = "GetEndpointAttributes" // GetEndpointAttributesRequest generates a "aws/request.Request" representing the @@ -1140,14 +1233,13 @@ const opGetEndpointAttributes = "GetEndpointAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetEndpointAttributesRequest method. +// req, resp := client.GetEndpointAttributesRequest(params) // -// // Example sending a request using the GetEndpointAttributesRequest method. -// req, resp := client.GetEndpointAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetEndpointAttributes func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (req *request.Request, output *GetEndpointAttributesOutput) { @@ -1180,17 +1272,18 @@ func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (r // API operation GetEndpointAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetEndpointAttributes func (c *SNS) GetEndpointAttributes(input *GetEndpointAttributesInput) (*GetEndpointAttributesOutput, error) { @@ -1230,14 +1323,13 @@ const opGetPlatformApplicationAttributes = "GetPlatformApplicationAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetPlatformApplicationAttributesRequest method. +// req, resp := client.GetPlatformApplicationAttributesRequest(params) // -// // Example sending a request using the GetPlatformApplicationAttributesRequest method. -// req, resp := client.GetPlatformApplicationAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetPlatformApplicationAttributes func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicationAttributesInput) (req *request.Request, output *GetPlatformApplicationAttributesOutput) { @@ -1270,17 +1362,18 @@ func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicat // API operation GetPlatformApplicationAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetPlatformApplicationAttributes func (c *SNS) GetPlatformApplicationAttributes(input *GetPlatformApplicationAttributesInput) (*GetPlatformApplicationAttributesOutput, error) { @@ -1320,14 +1413,13 @@ const opGetSMSAttributes = "GetSMSAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSMSAttributesRequest method. +// req, resp := client.GetSMSAttributesRequest(params) // -// // Example sending a request using the GetSMSAttributesRequest method. -// req, resp := client.GetSMSAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSAttributes func (c *SNS) GetSMSAttributesRequest(input *GetSMSAttributesInput) (req *request.Request, output *GetSMSAttributesOutput) { @@ -1361,18 +1453,19 @@ func (c *SNS) GetSMSAttributesRequest(input *GetSMSAttributesInput) (req *reques // API operation GetSMSAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSAttributes func (c *SNS) GetSMSAttributes(input *GetSMSAttributesInput) (*GetSMSAttributesOutput, error) { @@ -1412,14 +1505,13 @@ const opGetSMSSandboxAccountStatus = "GetSMSSandboxAccountStatus" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSMSSandboxAccountStatusRequest method. +// req, resp := client.GetSMSSandboxAccountStatusRequest(params) // -// // Example sending a request using the GetSMSSandboxAccountStatusRequest method. -// req, resp := client.GetSMSSandboxAccountStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSSandboxAccountStatus func (c *SNS) GetSMSSandboxAccountStatusRequest(input *GetSMSSandboxAccountStatusInput) (req *request.Request, output *GetSMSSandboxAccountStatusOutput) { @@ -1461,15 +1553,16 @@ func (c *SNS) GetSMSSandboxAccountStatusRequest(input *GetSMSSandboxAccountStatu // API operation GetSMSSandboxAccountStatus for usage and error information. // // Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. +// +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSMSSandboxAccountStatus func (c *SNS) GetSMSSandboxAccountStatus(input *GetSMSSandboxAccountStatusInput) (*GetSMSSandboxAccountStatusOutput, error) { @@ -1509,14 +1602,13 @@ const opGetSubscriptionAttributes = "GetSubscriptionAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSubscriptionAttributesRequest method. +// req, resp := client.GetSubscriptionAttributesRequest(params) // -// // Example sending a request using the GetSubscriptionAttributesRequest method. -// req, resp := client.GetSubscriptionAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSubscriptionAttributes func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesInput) (req *request.Request, output *GetSubscriptionAttributesOutput) { @@ -1547,17 +1639,18 @@ func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesI // API operation GetSubscriptionAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetSubscriptionAttributes func (c *SNS) GetSubscriptionAttributes(input *GetSubscriptionAttributesInput) (*GetSubscriptionAttributesOutput, error) { @@ -1597,14 +1690,13 @@ const opGetTopicAttributes = "GetTopicAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetTopicAttributesRequest method. +// req, resp := client.GetTopicAttributesRequest(params) // -// // Example sending a request using the GetTopicAttributesRequest method. -// req, resp := client.GetTopicAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetTopicAttributes func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *request.Request, output *GetTopicAttributesOutput) { @@ -1636,21 +1728,22 @@ func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *re // API operation GetTopicAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/GetTopicAttributes func (c *SNS) GetTopicAttributes(input *GetTopicAttributesInput) (*GetTopicAttributesOutput, error) { @@ -1690,14 +1783,13 @@ const opListEndpointsByPlatformApplication = "ListEndpointsByPlatformApplication // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListEndpointsByPlatformApplicationRequest method. +// req, resp := client.ListEndpointsByPlatformApplicationRequest(params) // -// // Example sending a request using the ListEndpointsByPlatformApplicationRequest method. -// req, resp := client.ListEndpointsByPlatformApplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListEndpointsByPlatformApplication func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPlatformApplicationInput) (req *request.Request, output *ListEndpointsByPlatformApplicationOutput) { @@ -1744,17 +1836,18 @@ func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPl // API operation ListEndpointsByPlatformApplication for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListEndpointsByPlatformApplication func (c *SNS) ListEndpointsByPlatformApplication(input *ListEndpointsByPlatformApplicationInput) (*ListEndpointsByPlatformApplicationOutput, error) { @@ -1786,15 +1879,14 @@ func (c *SNS) ListEndpointsByPlatformApplicationWithContext(ctx aws.Context, inp // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListEndpointsByPlatformApplication operation. -// pageNum := 0 -// err := client.ListEndpointsByPlatformApplicationPages(params, -// func(page *sns.ListEndpointsByPlatformApplicationOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListEndpointsByPlatformApplication operation. +// pageNum := 0 +// err := client.ListEndpointsByPlatformApplicationPages(params, +// func(page *sns.ListEndpointsByPlatformApplicationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlatformApplicationInput, fn func(*ListEndpointsByPlatformApplicationOutput, bool) bool) error { return c.ListEndpointsByPlatformApplicationPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -1846,14 +1938,13 @@ const opListOriginationNumbers = "ListOriginationNumbers" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListOriginationNumbersRequest method. +// req, resp := client.ListOriginationNumbersRequest(params) // -// // Example sending a request using the ListOriginationNumbersRequest method. -// req, resp := client.ListOriginationNumbersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListOriginationNumbers func (c *SNS) ListOriginationNumbersRequest(input *ListOriginationNumbersInput) (req *request.Request, output *ListOriginationNumbersOutput) { @@ -1893,21 +1984,22 @@ func (c *SNS) ListOriginationNumbersRequest(input *ListOriginationNumbersInput) // API operation ListOriginationNumbers for usage and error information. // // Returned Error Codes: -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeValidationException "ValidationException" -// Indicates that a parameter in the request is invalid. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeValidationException "ValidationException" +// Indicates that a parameter in the request is invalid. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListOriginationNumbers func (c *SNS) ListOriginationNumbers(input *ListOriginationNumbersInput) (*ListOriginationNumbersOutput, error) { @@ -1939,15 +2031,14 @@ func (c *SNS) ListOriginationNumbersWithContext(ctx aws.Context, input *ListOrig // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListOriginationNumbers operation. -// pageNum := 0 -// err := client.ListOriginationNumbersPages(params, -// func(page *sns.ListOriginationNumbersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListOriginationNumbers operation. +// pageNum := 0 +// err := client.ListOriginationNumbersPages(params, +// func(page *sns.ListOriginationNumbersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListOriginationNumbersPages(input *ListOriginationNumbersInput, fn func(*ListOriginationNumbersOutput, bool) bool) error { return c.ListOriginationNumbersPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -1999,14 +2090,13 @@ const opListPhoneNumbersOptedOut = "ListPhoneNumbersOptedOut" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListPhoneNumbersOptedOutRequest method. +// req, resp := client.ListPhoneNumbersOptedOutRequest(params) // -// // Example sending a request using the ListPhoneNumbersOptedOutRequest method. -// req, resp := client.ListPhoneNumbersOptedOutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPhoneNumbersOptedOut func (c *SNS) ListPhoneNumbersOptedOutRequest(input *ListPhoneNumbersOptedOutInput) (req *request.Request, output *ListPhoneNumbersOptedOutOutput) { @@ -2051,18 +2141,19 @@ func (c *SNS) ListPhoneNumbersOptedOutRequest(input *ListPhoneNumbersOptedOutInp // API operation ListPhoneNumbersOptedOut for usage and error information. // // Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPhoneNumbersOptedOut func (c *SNS) ListPhoneNumbersOptedOut(input *ListPhoneNumbersOptedOutInput) (*ListPhoneNumbersOptedOutOutput, error) { @@ -2094,15 +2185,14 @@ func (c *SNS) ListPhoneNumbersOptedOutWithContext(ctx aws.Context, input *ListPh // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListPhoneNumbersOptedOut operation. -// pageNum := 0 -// err := client.ListPhoneNumbersOptedOutPages(params, -// func(page *sns.ListPhoneNumbersOptedOutOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListPhoneNumbersOptedOut operation. +// pageNum := 0 +// err := client.ListPhoneNumbersOptedOutPages(params, +// func(page *sns.ListPhoneNumbersOptedOutOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListPhoneNumbersOptedOutPages(input *ListPhoneNumbersOptedOutInput, fn func(*ListPhoneNumbersOptedOutOutput, bool) bool) error { return c.ListPhoneNumbersOptedOutPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -2154,14 +2244,13 @@ const opListPlatformApplications = "ListPlatformApplications" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListPlatformApplicationsRequest method. +// req, resp := client.ListPlatformApplicationsRequest(params) // -// // Example sending a request using the ListPlatformApplicationsRequest method. -// req, resp := client.ListPlatformApplicationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPlatformApplications func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInput) (req *request.Request, output *ListPlatformApplicationsOutput) { @@ -2207,14 +2296,15 @@ func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInp // API operation ListPlatformApplications for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListPlatformApplications func (c *SNS) ListPlatformApplications(input *ListPlatformApplicationsInput) (*ListPlatformApplicationsOutput, error) { @@ -2246,15 +2336,14 @@ func (c *SNS) ListPlatformApplicationsWithContext(ctx aws.Context, input *ListPl // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListPlatformApplications operation. -// pageNum := 0 -// err := client.ListPlatformApplicationsPages(params, -// func(page *sns.ListPlatformApplicationsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListPlatformApplications operation. +// pageNum := 0 +// err := client.ListPlatformApplicationsPages(params, +// func(page *sns.ListPlatformApplicationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput, fn func(*ListPlatformApplicationsOutput, bool) bool) error { return c.ListPlatformApplicationsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -2306,14 +2395,13 @@ const opListSMSSandboxPhoneNumbers = "ListSMSSandboxPhoneNumbers" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListSMSSandboxPhoneNumbersRequest method. +// req, resp := client.ListSMSSandboxPhoneNumbersRequest(params) // -// // Example sending a request using the ListSMSSandboxPhoneNumbersRequest method. -// req, resp := client.ListSMSSandboxPhoneNumbersRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSMSSandboxPhoneNumbers func (c *SNS) ListSMSSandboxPhoneNumbersRequest(input *ListSMSSandboxPhoneNumbersInput) (req *request.Request, output *ListSMSSandboxPhoneNumbersOutput) { @@ -2361,22 +2449,23 @@ func (c *SNS) ListSMSSandboxPhoneNumbersRequest(input *ListSMSSandboxPhoneNumber // API operation ListSMSSandboxPhoneNumbers for usage and error information. // // Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. +// - ErrCodeResourceNotFoundException "ResourceNotFound" +// Can’t perform the action on the specified resource. Make sure that the +// resource exists. // -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSMSSandboxPhoneNumbers func (c *SNS) ListSMSSandboxPhoneNumbers(input *ListSMSSandboxPhoneNumbersInput) (*ListSMSSandboxPhoneNumbersOutput, error) { @@ -2408,15 +2497,14 @@ func (c *SNS) ListSMSSandboxPhoneNumbersWithContext(ctx aws.Context, input *List // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListSMSSandboxPhoneNumbers operation. -// pageNum := 0 -// err := client.ListSMSSandboxPhoneNumbersPages(params, -// func(page *sns.ListSMSSandboxPhoneNumbersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListSMSSandboxPhoneNumbers operation. +// pageNum := 0 +// err := client.ListSMSSandboxPhoneNumbersPages(params, +// func(page *sns.ListSMSSandboxPhoneNumbersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListSMSSandboxPhoneNumbersPages(input *ListSMSSandboxPhoneNumbersInput, fn func(*ListSMSSandboxPhoneNumbersOutput, bool) bool) error { return c.ListSMSSandboxPhoneNumbersPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -2468,14 +2556,13 @@ const opListSubscriptions = "ListSubscriptions" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListSubscriptionsRequest method. +// req, resp := client.ListSubscriptionsRequest(params) // -// // Example sending a request using the ListSubscriptionsRequest method. -// req, resp := client.ListSubscriptionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptions func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *request.Request, output *ListSubscriptionsOutput) { @@ -2517,14 +2604,15 @@ func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *requ // API operation ListSubscriptions for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. +// +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptions func (c *SNS) ListSubscriptions(input *ListSubscriptionsInput) (*ListSubscriptionsOutput, error) { @@ -2556,15 +2644,14 @@ func (c *SNS) ListSubscriptionsWithContext(ctx aws.Context, input *ListSubscript // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListSubscriptions operation. -// pageNum := 0 -// err := client.ListSubscriptionsPages(params, -// func(page *sns.ListSubscriptionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListSubscriptions operation. +// pageNum := 0 +// err := client.ListSubscriptionsPages(params, +// func(page *sns.ListSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(*ListSubscriptionsOutput, bool) bool) error { return c.ListSubscriptionsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -2616,14 +2703,13 @@ const opListSubscriptionsByTopic = "ListSubscriptionsByTopic" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListSubscriptionsByTopicRequest method. +// req, resp := client.ListSubscriptionsByTopicRequest(params) // -// // Example sending a request using the ListSubscriptionsByTopicRequest method. -// req, resp := client.ListSubscriptionsByTopicRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptionsByTopic func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInput) (req *request.Request, output *ListSubscriptionsByTopicOutput) { @@ -2665,17 +2751,18 @@ func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInp // API operation ListSubscriptionsByTopic for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. +// +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListSubscriptionsByTopic func (c *SNS) ListSubscriptionsByTopic(input *ListSubscriptionsByTopicInput) (*ListSubscriptionsByTopicOutput, error) { @@ -2707,15 +2794,14 @@ func (c *SNS) ListSubscriptionsByTopicWithContext(ctx aws.Context, input *ListSu // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListSubscriptionsByTopic operation. -// pageNum := 0 -// err := client.ListSubscriptionsByTopicPages(params, -// func(page *sns.ListSubscriptionsByTopicOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListSubscriptionsByTopic operation. +// pageNum := 0 +// err := client.ListSubscriptionsByTopicPages(params, +// func(page *sns.ListSubscriptionsByTopicOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput, fn func(*ListSubscriptionsByTopicOutput, bool) bool) error { return c.ListSubscriptionsByTopicPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -2767,14 +2853,13 @@ const opListTagsForResource = "ListTagsForResource" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTagsForResource func (c *SNS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { @@ -2807,23 +2892,24 @@ func (c *SNS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req * // API operation ListTagsForResource for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. // -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. +// - ErrCodeResourceNotFoundException "ResourceNotFound" +// Can’t perform the action on the specified resource. Make sure that the +// resource exists. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeTagPolicyException "TagPolicy" +// The request doesn't comply with the IAM tag policy. Correct your request +// and then retry it. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeConcurrentAccessException "ConcurrentAccess" +// Can't perform multiple operations on a tag simultaneously. Perform the operations +// sequentially. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTagsForResource func (c *SNS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { @@ -2863,14 +2949,13 @@ const opListTopics = "ListTopics" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListTopicsRequest method. +// req, resp := client.ListTopicsRequest(params) // -// // Example sending a request using the ListTopicsRequest method. -// req, resp := client.ListTopicsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTopics func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, output *ListTopicsOutput) { @@ -2911,14 +2996,15 @@ func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, o // API operation ListTopics for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/ListTopics func (c *SNS) ListTopics(input *ListTopicsInput) (*ListTopicsOutput, error) { @@ -2950,15 +3036,14 @@ func (c *SNS) ListTopicsWithContext(ctx aws.Context, input *ListTopicsInput, opt // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListTopics operation. -// pageNum := 0 -// err := client.ListTopicsPages(params, -// func(page *sns.ListTopicsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListTopics operation. +// pageNum := 0 +// err := client.ListTopicsPages(params, +// func(page *sns.ListTopicsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(*ListTopicsOutput, bool) bool) error { return c.ListTopicsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -3010,14 +3095,13 @@ const opOptInPhoneNumber = "OptInPhoneNumber" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the OptInPhoneNumberRequest method. +// req, resp := client.OptInPhoneNumberRequest(params) // -// // Example sending a request using the OptInPhoneNumberRequest method. -// req, resp := client.OptInPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/OptInPhoneNumber func (c *SNS) OptInPhoneNumberRequest(input *OptInPhoneNumberInput) (req *request.Request, output *OptInPhoneNumberOutput) { @@ -3052,18 +3136,19 @@ func (c *SNS) OptInPhoneNumberRequest(input *OptInPhoneNumberInput) (req *reques // API operation OptInPhoneNumber for usage and error information. // // Returned Error Codes: -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/OptInPhoneNumber func (c *SNS) OptInPhoneNumber(input *OptInPhoneNumberInput) (*OptInPhoneNumberOutput, error) { @@ -3103,14 +3188,13 @@ const opPublish = "Publish" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the PublishRequest method. +// req, resp := client.PublishRequest(params) // -// // Example sending a request using the PublishRequest method. -// req, resp := client.PublishRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Publish func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { @@ -3161,56 +3245,60 @@ func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output // API operation Publish for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInvalidParameterValueException "ParameterValueInvalid" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInvalidParameterValueException "ParameterValueInvalid" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeEndpointDisabledException "EndpointDisabled" -// Exception error indicating endpoint disabled. +// - ErrCodeEndpointDisabledException "EndpointDisabled" +// Exception error indicating endpoint disabled. // -// * ErrCodePlatformApplicationDisabledException "PlatformApplicationDisabled" -// Exception error indicating platform application disabled. +// - ErrCodePlatformApplicationDisabledException "PlatformApplicationDisabled" +// Exception error indicating platform application disabled. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeKMSDisabledException "KMSDisabled" -// The request was rejected because the specified customer master key (CMK) -// isn't enabled. +// - ErrCodeKMSDisabledException "KMSDisabled" +// The request was rejected because the specified customer master key (CMK) +// isn't enabled. // -// * ErrCodeKMSInvalidStateException "KMSInvalidState" -// The request was rejected because the state of the specified resource isn't -// valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. +// - ErrCodeKMSInvalidStateException "KMSInvalidState" +// The request was rejected because the state of the specified resource isn't +// valid for this request. For more information, see How Key State Affects Use +// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. // -// * ErrCodeKMSNotFoundException "KMSNotFound" -// The request was rejected because the specified entity or resource can't be -// found. +// - ErrCodeKMSNotFoundException "KMSNotFound" +// The request was rejected because the specified entity or resource can't be +// found. // -// * ErrCodeKMSOptInRequired "KMSOptInRequired" -// The Amazon Web Services access key ID needs a subscription for the service. +// - ErrCodeKMSOptInRequired "KMSOptInRequired" +// The Amazon Web Services access key ID needs a subscription for the service. // -// * ErrCodeKMSThrottlingException "KMSThrottling" -// The request was denied due to request throttling. For more information about -// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) -// in the Key Management Service Developer Guide. +// - ErrCodeKMSThrottlingException "KMSThrottling" +// The request was denied due to request throttling. For more information about +// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// in the Key Management Service Developer Guide. // -// * ErrCodeKMSAccessDeniedException "KMSAccessDenied" -// The ciphertext references a key that doesn't exist or that you don't have -// access to. +// - ErrCodeKMSAccessDeniedException "KMSAccessDenied" +// The ciphertext references a key that doesn't exist or that you don't have +// access to. // -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. +// +// - ErrCodeValidationException "ValidationException" +// Indicates that a parameter in the request is invalid. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Publish func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { @@ -3250,14 +3338,13 @@ const opPublishBatch = "PublishBatch" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the PublishBatchRequest method. +// req, resp := client.PublishBatchRequest(params) // -// // Example sending a request using the PublishBatchRequest method. -// req, resp := client.PublishBatchRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/PublishBatch func (c *SNS) PublishBatchRequest(input *PublishBatchInput) (req *request.Request, output *PublishBatchOutput) { @@ -3315,71 +3402,75 @@ func (c *SNS) PublishBatchRequest(input *PublishBatchInput) (req *request.Reques // API operation PublishBatch for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInvalidParameterValueException "ParameterValueInvalid" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterValueException "ParameterValueInvalid" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeEndpointDisabledException "EndpointDisabled" -// Exception error indicating endpoint disabled. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodePlatformApplicationDisabledException "PlatformApplicationDisabled" -// Exception error indicating platform application disabled. +// - ErrCodeEndpointDisabledException "EndpointDisabled" +// Exception error indicating endpoint disabled. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodePlatformApplicationDisabledException "PlatformApplicationDisabled" +// Exception error indicating platform application disabled. // -// * ErrCodeBatchEntryIdsNotDistinctException "BatchEntryIdsNotDistinct" -// Two or more batch entries in the request have the same Id. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeBatchRequestTooLongException "BatchRequestTooLong" -// The length of all the batch messages put together is more than the limit. +// - ErrCodeBatchEntryIdsNotDistinctException "BatchEntryIdsNotDistinct" +// Two or more batch entries in the request have the same Id. // -// * ErrCodeEmptyBatchRequestException "EmptyBatchRequest" -// The batch request doesn't contain any entries. +// - ErrCodeBatchRequestTooLongException "BatchRequestTooLong" +// The length of all the batch messages put together is more than the limit. // -// * ErrCodeInvalidBatchEntryIdException "InvalidBatchEntryId" -// The Id of a batch entry in a batch request doesn't abide by the specification. +// - ErrCodeEmptyBatchRequestException "EmptyBatchRequest" +// The batch request doesn't contain any entries. // -// * ErrCodeTooManyEntriesInBatchRequestException "TooManyEntriesInBatchRequest" -// The batch request contains more entries than permissible. +// - ErrCodeInvalidBatchEntryIdException "InvalidBatchEntryId" +// The Id of a batch entry in a batch request doesn't abide by the specification. // -// * ErrCodeKMSDisabledException "KMSDisabled" -// The request was rejected because the specified customer master key (CMK) -// isn't enabled. +// - ErrCodeTooManyEntriesInBatchRequestException "TooManyEntriesInBatchRequest" +// The batch request contains more entries than permissible. // -// * ErrCodeKMSInvalidStateException "KMSInvalidState" -// The request was rejected because the state of the specified resource isn't -// valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. +// - ErrCodeKMSDisabledException "KMSDisabled" +// The request was rejected because the specified customer master key (CMK) +// isn't enabled. // -// * ErrCodeKMSNotFoundException "KMSNotFound" -// The request was rejected because the specified entity or resource can't be -// found. +// - ErrCodeKMSInvalidStateException "KMSInvalidState" +// The request was rejected because the state of the specified resource isn't +// valid for this request. For more information, see How Key State Affects Use +// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the Key Management Service Developer Guide. // -// * ErrCodeKMSOptInRequired "KMSOptInRequired" -// The Amazon Web Services access key ID needs a subscription for the service. +// - ErrCodeKMSNotFoundException "KMSNotFound" +// The request was rejected because the specified entity or resource can't be +// found. // -// * ErrCodeKMSThrottlingException "KMSThrottling" -// The request was denied due to request throttling. For more information about -// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) -// in the Key Management Service Developer Guide. +// - ErrCodeKMSOptInRequired "KMSOptInRequired" +// The Amazon Web Services access key ID needs a subscription for the service. // -// * ErrCodeKMSAccessDeniedException "KMSAccessDenied" -// The ciphertext references a key that doesn't exist or that you don't have -// access to. +// - ErrCodeKMSThrottlingException "KMSThrottling" +// The request was denied due to request throttling. For more information about +// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// in the Key Management Service Developer Guide. // -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. +// - ErrCodeKMSAccessDeniedException "KMSAccessDenied" +// The ciphertext references a key that doesn't exist or that you don't have +// access to. +// +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. +// +// - ErrCodeValidationException "ValidationException" +// Indicates that a parameter in the request is invalid. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/PublishBatch func (c *SNS) PublishBatch(input *PublishBatchInput) (*PublishBatchOutput, error) { @@ -3403,6 +3494,100 @@ func (c *SNS) PublishBatchWithContext(ctx aws.Context, input *PublishBatchInput, return out, req.Send() } +const opPutDataProtectionPolicy = "PutDataProtectionPolicy" + +// PutDataProtectionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDataProtectionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutDataProtectionPolicy for more information on using the PutDataProtectionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutDataProtectionPolicyRequest method. +// req, resp := client.PutDataProtectionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/PutDataProtectionPolicy +func (c *SNS) PutDataProtectionPolicyRequest(input *PutDataProtectionPolicyInput) (req *request.Request, output *PutDataProtectionPolicyOutput) { + op := &request.Operation{ + Name: opPutDataProtectionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutDataProtectionPolicyInput{} + } + + output = &PutDataProtectionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutDataProtectionPolicy API operation for Amazon Simple Notification Service. +// +// Adds or updates an inline policy document that is stored in the specified +// Amazon SNS topic. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Notification Service's +// API operation PutDataProtectionPolicy for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. +// +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. +// +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/PutDataProtectionPolicy +func (c *SNS) PutDataProtectionPolicy(input *PutDataProtectionPolicyInput) (*PutDataProtectionPolicyOutput, error) { + req, out := c.PutDataProtectionPolicyRequest(input) + return out, req.Send() +} + +// PutDataProtectionPolicyWithContext is the same as PutDataProtectionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutDataProtectionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SNS) PutDataProtectionPolicyWithContext(ctx aws.Context, input *PutDataProtectionPolicyInput, opts ...request.Option) (*PutDataProtectionPolicyOutput, error) { + req, out := c.PutDataProtectionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRemovePermission = "RemovePermission" // RemovePermissionRequest generates a "aws/request.Request" representing the @@ -3419,14 +3604,13 @@ const opRemovePermission = "RemovePermission" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) // -// // Example sending a request using the RemovePermissionRequest method. -// req, resp := client.RemovePermissionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/RemovePermission func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { @@ -3458,17 +3642,18 @@ func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *reques // API operation RemovePermission for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/RemovePermission func (c *SNS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) { @@ -3508,14 +3693,13 @@ const opSetEndpointAttributes = "SetEndpointAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the SetEndpointAttributesRequest method. +// req, resp := client.SetEndpointAttributesRequest(params) // -// // Example sending a request using the SetEndpointAttributesRequest method. -// req, resp := client.SetEndpointAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetEndpointAttributes func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (req *request.Request, output *SetEndpointAttributesOutput) { @@ -3549,17 +3733,18 @@ func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (r // API operation SetEndpointAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetEndpointAttributes func (c *SNS) SetEndpointAttributes(input *SetEndpointAttributesInput) (*SetEndpointAttributesOutput, error) { @@ -3599,14 +3784,13 @@ const opSetPlatformApplicationAttributes = "SetPlatformApplicationAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the SetPlatformApplicationAttributesRequest method. +// req, resp := client.SetPlatformApplicationAttributesRequest(params) // -// // Example sending a request using the SetPlatformApplicationAttributesRequest method. -// req, resp := client.SetPlatformApplicationAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetPlatformApplicationAttributes func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicationAttributesInput) (req *request.Request, output *SetPlatformApplicationAttributesOutput) { @@ -3642,17 +3826,18 @@ func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicat // API operation SetPlatformApplicationAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetPlatformApplicationAttributes func (c *SNS) SetPlatformApplicationAttributes(input *SetPlatformApplicationAttributesInput) (*SetPlatformApplicationAttributesOutput, error) { @@ -3692,14 +3877,13 @@ const opSetSMSAttributes = "SetSMSAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the SetSMSAttributesRequest method. +// req, resp := client.SetSMSAttributesRequest(params) // -// // Example sending a request using the SetSMSAttributesRequest method. -// req, resp := client.SetSMSAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSMSAttributes func (c *SNS) SetSMSAttributesRequest(input *SetSMSAttributesInput) (req *request.Request, output *SetSMSAttributesOutput) { @@ -3740,18 +3924,19 @@ func (c *SNS) SetSMSAttributesRequest(input *SetSMSAttributesInput) (req *reques // API operation SetSMSAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSMSAttributes func (c *SNS) SetSMSAttributes(input *SetSMSAttributesInput) (*SetSMSAttributesOutput, error) { @@ -3791,14 +3976,13 @@ const opSetSubscriptionAttributes = "SetSubscriptionAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the SetSubscriptionAttributesRequest method. +// req, resp := client.SetSubscriptionAttributesRequest(params) // -// // Example sending a request using the SetSubscriptionAttributesRequest method. -// req, resp := client.SetSubscriptionAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSubscriptionAttributes func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesInput) (req *request.Request, output *SetSubscriptionAttributesOutput) { @@ -3831,22 +4015,23 @@ func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesI // API operation SetSubscriptionAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" -// Indicates that the number of filter polices in your Amazon Web Services account -// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit -// Increase case in the Amazon Web Services Support Center. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" +// Indicates that the number of filter polices in your Amazon Web Services account +// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit +// Increase case in the Amazon Web Services Support Center. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetSubscriptionAttributes func (c *SNS) SetSubscriptionAttributes(input *SetSubscriptionAttributesInput) (*SetSubscriptionAttributesOutput, error) { @@ -3886,14 +4071,13 @@ const opSetTopicAttributes = "SetTopicAttributes" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the SetTopicAttributesRequest method. +// req, resp := client.SetTopicAttributesRequest(params) // -// // Example sending a request using the SetTopicAttributesRequest method. -// req, resp := client.SetTopicAttributesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetTopicAttributes func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *request.Request, output *SetTopicAttributesOutput) { @@ -3925,21 +4109,22 @@ func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *re // API operation SetTopicAttributes for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/SetTopicAttributes func (c *SNS) SetTopicAttributes(input *SetTopicAttributesInput) (*SetTopicAttributesOutput, error) { @@ -3979,14 +4164,13 @@ const opSubscribe = "Subscribe" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the SubscribeRequest method. +// req, resp := client.SubscribeRequest(params) // -// // Example sending a request using the SubscribeRequest method. -// req, resp := client.SubscribeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Subscribe func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, output *SubscribeOutput) { @@ -4025,29 +4209,30 @@ func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, out // API operation Subscribe for usage and error information. // // Returned Error Codes: -// * ErrCodeSubscriptionLimitExceededException "SubscriptionLimitExceeded" -// Indicates that the customer already owns the maximum allowed number of subscriptions. // -// * ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" -// Indicates that the number of filter polices in your Amazon Web Services account -// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit -// Increase case in the Amazon Web Services Support Center. +// - ErrCodeSubscriptionLimitExceededException "SubscriptionLimitExceeded" +// Indicates that the customer already owns the maximum allowed number of subscriptions. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeFilterPolicyLimitExceededException "FilterPolicyLimitExceeded" +// Indicates that the number of filter polices in your Amazon Web Services account +// exceeds the limit. To add more filter polices, submit an Amazon SNS Limit +// Increase case in the Amazon Web Services Support Center. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Subscribe func (c *SNS) Subscribe(input *SubscribeInput) (*SubscribeOutput, error) { @@ -4087,14 +4272,13 @@ const opTagResource = "TagResource" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/TagResource func (c *SNS) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { @@ -4122,19 +4306,19 @@ func (c *SNS) TagResourceRequest(input *TagResourceInput) (req *request.Request, // // When you use topic tags, keep the following guidelines in mind: // -// * Adding more than 50 tags to a topic isn't recommended. +// - Adding more than 50 tags to a topic isn't recommended. // -// * Tags don't have any semantic meaning. Amazon SNS interprets tags as -// character strings. +// - Tags don't have any semantic meaning. Amazon SNS interprets tags as +// character strings. // -// * Tags are case-sensitive. +// - Tags are case-sensitive. // -// * A new tag with a key identical to that of an existing tag overwrites -// the existing tag. +// - A new tag with a key identical to that of an existing tag overwrites +// the existing tag. // -// * Tagging actions are limited to 10 TPS per Amazon Web Services account, -// per Amazon Web Services Region. If your application requires a higher -// throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). +// - Tagging actions are limited to 10 TPS per Amazon Web Services account, +// per Amazon Web Services Region. If your application requires a higher +// throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4144,30 +4328,31 @@ func (c *SNS) TagResourceRequest(input *TagResourceInput) (req *request.Request, // API operation TagResource for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. // -// * ErrCodeTagLimitExceededException "TagLimitExceeded" -// Can't add more than 50 tags to a topic. +// - ErrCodeResourceNotFoundException "ResourceNotFound" +// Can’t perform the action on the specified resource. Make sure that the +// resource exists. +// +// - ErrCodeTagLimitExceededException "TagLimitExceeded" +// Can't add more than 50 tags to a topic. // -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. +// - ErrCodeStaleTagException "StaleTag" +// A tag has been added to a resource with the same ARN as a deleted resource. +// Wait a short while and then retry the operation. // -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. +// - ErrCodeTagPolicyException "TagPolicy" +// The request doesn't comply with the IAM tag policy. Correct your request +// and then retry it. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. +// - ErrCodeConcurrentAccessException "ConcurrentAccess" +// Can't perform multiple operations on a tag simultaneously. Perform the operations +// sequentially. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/TagResource func (c *SNS) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -4207,14 +4392,13 @@ const opUnsubscribe = "Unsubscribe" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UnsubscribeRequest method. +// req, resp := client.UnsubscribeRequest(params) // -// // Example sending a request using the UnsubscribeRequest method. -// req, resp := client.UnsubscribeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Unsubscribe func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, output *UnsubscribeOutput) { @@ -4254,21 +4438,22 @@ func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, // API operation Unsubscribe for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. +// +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeNotFoundException "NotFound" -// Indicates that the requested resource does not exist. +// - ErrCodeNotFoundException "NotFound" +// Indicates that the requested resource does not exist. // -// * ErrCodeInvalidSecurityException "InvalidSecurity" -// The credential signature isn't valid. You must use an HTTPS endpoint and -// sign your request using Signature Version 4. +// - ErrCodeInvalidSecurityException "InvalidSecurity" +// The credential signature isn't valid. You must use an HTTPS endpoint and +// sign your request using Signature Version 4. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Unsubscribe func (c *SNS) Unsubscribe(input *UnsubscribeInput) (*UnsubscribeOutput, error) { @@ -4308,14 +4493,13 @@ const opUntagResource = "UntagResource" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/UntagResource func (c *SNS) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { @@ -4349,30 +4533,31 @@ func (c *SNS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ // API operation UntagResource for usage and error information. // // Returned Error Codes: -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. // -// * ErrCodeTagLimitExceededException "TagLimitExceeded" -// Can't add more than 50 tags to a topic. +// - ErrCodeResourceNotFoundException "ResourceNotFound" +// Can’t perform the action on the specified resource. Make sure that the +// resource exists. // -// * ErrCodeStaleTagException "StaleTag" -// A tag has been added to a resource with the same ARN as a deleted resource. -// Wait a short while and then retry the operation. +// - ErrCodeTagLimitExceededException "TagLimitExceeded" +// Can't add more than 50 tags to a topic. // -// * ErrCodeTagPolicyException "TagPolicy" -// The request doesn't comply with the IAM tag policy. Correct your request -// and then retry it. +// - ErrCodeStaleTagException "StaleTag" +// A tag has been added to a resource with the same ARN as a deleted resource. +// Wait a short while and then retry the operation. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeTagPolicyException "TagPolicy" +// The request doesn't comply with the IAM tag policy. Correct your request +// and then retry it. // -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeConcurrentAccessException "ConcurrentAccess" -// Can't perform multiple operations on a tag simultaneously. Perform the operations -// sequentially. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. +// +// - ErrCodeConcurrentAccessException "ConcurrentAccess" +// Can't perform multiple operations on a tag simultaneously. Perform the operations +// sequentially. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/UntagResource func (c *SNS) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -4412,14 +4597,13 @@ const opVerifySMSSandboxPhoneNumber = "VerifySMSSandboxPhoneNumber" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the VerifySMSSandboxPhoneNumberRequest method. +// req, resp := client.VerifySMSSandboxPhoneNumberRequest(params) // -// // Example sending a request using the VerifySMSSandboxPhoneNumberRequest method. -// req, resp := client.VerifySMSSandboxPhoneNumberRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/VerifySMSSandboxPhoneNumber func (c *SNS) VerifySMSSandboxPhoneNumberRequest(input *VerifySMSSandboxPhoneNumberInput) (req *request.Request, output *VerifySMSSandboxPhoneNumberOutput) { @@ -4462,25 +4646,26 @@ func (c *SNS) VerifySMSSandboxPhoneNumberRequest(input *VerifySMSSandboxPhoneNum // API operation VerifySMSSandboxPhoneNumber for usage and error information. // // Returned Error Codes: -// * ErrCodeAuthorizationErrorException "AuthorizationError" -// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInternalErrorException "InternalError" -// Indicates an internal service error. +// - ErrCodeAuthorizationErrorException "AuthorizationError" +// Indicates that the user has been denied access to the requested resource. // -// * ErrCodeInvalidParameterException "InvalidParameter" -// Indicates that a request parameter does not comply with the associated constraints. +// - ErrCodeInternalErrorException "InternalError" +// Indicates an internal service error. // -// * ErrCodeResourceNotFoundException "ResourceNotFound" -// Can’t perform the action on the specified resource. Make sure that the -// resource exists. +// - ErrCodeInvalidParameterException "InvalidParameter" +// Indicates that a request parameter does not comply with the associated constraints. // -// * ErrCodeVerificationException "VerificationException" -// Indicates that the one-time password (OTP) used for verification is invalid. +// - ErrCodeResourceNotFoundException "ResourceNotFound" +// Can’t perform the action on the specified resource. Make sure that the +// resource exists. // -// * ErrCodeThrottledException "Throttled" -// Indicates that the rate at which requests have been submitted for this action -// exceeds the limit for your Amazon Web Services account. +// - ErrCodeVerificationException "VerificationException" +// Indicates that the one-time password (OTP) used for verification is invalid. +// +// - ErrCodeThrottledException "Throttled" +// Indicates that the rate at which requests have been submitted for this action +// exceeds the limit for your Amazon Web Services account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/VerifySMSSandboxPhoneNumber func (c *SNS) VerifySMSSandboxPhoneNumber(input *VerifySMSSandboxPhoneNumberInput) (*VerifySMSSandboxPhoneNumberOutput, error) { @@ -5223,6 +5408,15 @@ type CreateTopicInput struct { // parameter for the Publish action. Attributes map[string]*string `type:"map"` + // The body of the policy document you want to use for this topic. + // + // You can only add one policy per topic. + // + // The policy must be in JSON string format. + // + // Length Constraints: Maximum length of 30,720. + DataProtectionPolicy *string `type:"string"` + // The name of the topic you want to create. // // Constraints: Topic names must be made up of only uppercase and lowercase @@ -5288,6 +5482,12 @@ func (s *CreateTopicInput) SetAttributes(v map[string]*string) *CreateTopicInput return s } +// SetDataProtectionPolicy sets the DataProtectionPolicy field's value. +func (s *CreateTopicInput) SetDataProtectionPolicy(v string) *CreateTopicInput { + s.DataProtectionPolicy = &v + return s +} + // SetName sets the Name field's value. func (s *CreateTopicInput) SetName(v string) *CreateTopicInput { s.Name = &v @@ -5647,6 +5847,86 @@ func (s *Endpoint) SetEndpointArn(v string) *Endpoint { return s } +type GetDataProtectionPolicyInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic whose DataProtectionPolicy you want to get. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataProtectionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataProtectionPolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetDataProtectionPolicyInput) SetResourceArn(v string) *GetDataProtectionPolicyInput { + s.ResourceArn = &v + return s +} + +type GetDataProtectionPolicyOutput struct { + _ struct{} `type:"structure"` + + // Retrieves the DataProtectionPolicy in JSON string format. + DataProtectionPolicy *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataProtectionPolicyOutput) GoString() string { + return s.String() +} + +// SetDataProtectionPolicy sets the DataProtectionPolicy field's value. +func (s *GetDataProtectionPolicyOutput) SetDataProtectionPolicy(v string) *GetDataProtectionPolicyOutput { + s.DataProtectionPolicy = &v + return s +} + // Input for GetEndpointAttributes action. type GetEndpointAttributesInput struct { _ struct{} `type:"structure"` @@ -7871,6 +8151,95 @@ func (s *PublishOutput) SetSequenceNumber(v string) *PublishOutput { return s } +type PutDataProtectionPolicyInput struct { + _ struct{} `type:"structure"` + + // The JSON serialization of the topic's DataProtectionPolicy. + // + // The DataProtectionPolicy must be in JSON string format. + // + // Length Constraints: Maximum length of 30,720. + // + // DataProtectionPolicy is a required field + DataProtectionPolicy *string `type:"string" required:"true"` + + // The ARN of the topic whose DataProtectionPolicy you want to add or update. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDataProtectionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutDataProtectionPolicyInput"} + if s.DataProtectionPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("DataProtectionPolicy")) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataProtectionPolicy sets the DataProtectionPolicy field's value. +func (s *PutDataProtectionPolicyInput) SetDataProtectionPolicy(v string) *PutDataProtectionPolicyInput { + s.DataProtectionPolicy = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *PutDataProtectionPolicyInput) SetResourceArn(v string) *PutDataProtectionPolicyInput { + s.ResourceArn = &v + return s +} + +type PutDataProtectionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutDataProtectionPolicyOutput) GoString() string { + return s.String() +} + // Input for RemovePermission action. type RemovePermissionInput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sns/doc.go index 714e3b87f6e..046c1261738 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/doc.go @@ -26,7 +26,7 @@ // See sns package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/sns/ // -// Using the Client +// # Using the Client // // To contact Amazon Simple Notification Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go index b56ae0bc941..fdb0bd3e492 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -39,13 +39,14 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a SNS client from just a session. -// svc := sns.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a SNS client with additional configuration -// svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a SNS client from just a session. +// svc := sns.New(mySession) +// +// // Create a SNS client with additional configuration +// svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go index 023dd91d7ad..b8f590f71d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -29,14 +29,13 @@ const opGetRoleCredentials = "GetRoleCredentials" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) // -// // Example sending a request using the GetRoleCredentialsRequest method. -// req, resp := client.GetRoleCredentialsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { @@ -69,20 +68,21 @@ func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *re // API operation GetRoleCredentials for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { @@ -122,14 +122,13 @@ const opListAccountRoles = "ListAccountRoles" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) // -// // Example sending a request using the ListAccountRolesRequest method. -// req, resp := client.ListAccountRolesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { @@ -157,8 +156,7 @@ func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *reques // ListAccountRoles API operation for AWS Single Sign-On. // -// Lists all roles that are assigned to the user for a given Amazon Web Services -// account. +// Lists all roles that are assigned to the user for a given AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -168,20 +166,21 @@ func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *reques // API operation ListAccountRoles for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { @@ -213,15 +212,14 @@ func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRol // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListAccountRoles operation. -// pageNum := 0 -// err := client.ListAccountRolesPages(params, -// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -273,14 +271,13 @@ const opListAccounts = "ListAccounts" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) // -// // Example sending a request using the ListAccountsRequest method. -// req, resp := client.ListAccountsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { @@ -308,10 +305,10 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques // ListAccounts API operation for AWS Single Sign-On. // -// Lists all Amazon Web Services accounts assigned to the user. These Amazon -// Web Services accounts are assigned by the administrator of the account. For -// more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the Amazon Web Services SSO User Guide. This operation returns a paginated +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the IAM Identity Center User Guide. This operation returns a paginated // response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -322,20 +319,21 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques // API operation ListAccounts for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { @@ -367,15 +365,14 @@ func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListAccounts operation. -// pageNum := 0 -// err := client.ListAccountsPages(params, -// func(page *sso.ListAccountsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -427,14 +424,13 @@ const opLogout = "Logout" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) // -// // Example sending a request using the LogoutRequest method. -// req, resp := client.LogoutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { @@ -458,20 +454,20 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L // Logout API operation for AWS Single Sign-On. // // Removes the locally stored SSO tokens from the client-side cache and sends -// an API call to the Amazon Web Services SSO service to invalidate the corresponding -// server-side Amazon Web Services SSO sign in session. +// an API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. // -// If a user uses Amazon Web Services SSO to access the AWS CLI, the user’s -// Amazon Web Services SSO sign in session is used to obtain an IAM session, -// as specified in the corresponding Amazon Web Services SSO permission set. -// More specifically, Amazon Web Services SSO assumes an IAM role in the target -// account on behalf of the user, and the corresponding temporary Amazon Web -// Services credentials are returned to the client. +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, +// IAM Identity Center assumes an IAM role in the target account on behalf of +// the user, and the corresponding temporary AWS credentials are returned to +// the client. // // After user logout, any existing IAM role sessions that were created by using -// Amazon Web Services SSO permission sets continue based on the duration configured +// IAM Identity Center permission sets continue based on the duration configured // in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) -// in the Amazon Web Services SSO User Guide. +// in the IAM Identity Center User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -481,17 +477,18 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L // API operation Logout for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { @@ -515,20 +512,17 @@ func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...req return out, req.Send() } -// Provides information about your Amazon Web Services account. +// Provides information about your AWS account. type AccountInfo struct { _ struct{} `type:"structure"` - // The identifier of the Amazon Web Services account that is assigned to the - // user. + // The identifier of the AWS account that is assigned to the user. AccountId *string `locationName:"accountId" type:"string"` - // The display name of the Amazon Web Services account that is assigned to the - // user. + // The display name of the AWS account that is assigned to the user. AccountName *string `locationName:"accountName" type:"string"` - // The email address of the Amazon Web Services account that is assigned to - // the user. + // The email address of the AWS account that is assigned to the user. EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` } @@ -573,7 +567,7 @@ type GetRoleCredentialsInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the Amazon Web Services SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetRoleCredentialsInput's @@ -582,8 +576,7 @@ type GetRoleCredentialsInput struct { // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - // The identifier for the Amazon Web Services account that is assigned to the - // user. + // The identifier for the AWS account that is assigned to the user. // // AccountId is a required field AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` @@ -750,7 +743,7 @@ type ListAccountRolesInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the Amazon Web Services SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListAccountRolesInput's @@ -759,8 +752,7 @@ type ListAccountRolesInput struct { // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` - // The identifier for the Amazon Web Services account that is assigned to the - // user. + // The identifier for the AWS account that is assigned to the user. // // AccountId is a required field AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` @@ -880,7 +872,7 @@ type ListAccountsInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the Amazon Web Services SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListAccountsInput's @@ -995,7 +987,7 @@ type LogoutInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the Amazon Web Services SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by LogoutInput's @@ -1134,18 +1126,17 @@ type RoleCredentials struct { _ struct{} `type:"structure"` // The identifier used for the temporary security credentials. For more information, - // see Using Temporary Security Credentials to Request Access to Amazon Web - // Services Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the Amazon Web Services IAM User Guide. + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. AccessKeyId *string `locationName:"accessKeyId" type:"string"` // The date on which temporary security credentials expire. Expiration *int64 `locationName:"expiration" type:"long"` // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to Amazon Web Services Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the Amazon Web Services IAM User Guide. + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. // // SecretAccessKey is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by RoleCredentials's @@ -1153,9 +1144,8 @@ type RoleCredentials struct { SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to Amazon Web Services Resources - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the Amazon Web Services IAM User Guide. + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. // // SessionToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by RoleCredentials's @@ -1209,7 +1199,7 @@ func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { type RoleInfo struct { _ struct{} `type:"structure"` - // The identifier of the Amazon Web Services account assigned to the user. + // The identifier of the AWS account assigned to the user. AccountId *string `locationName:"accountId" type:"string"` // The friendly name of the role that is assigned to the user. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go index 6bb96d263e3..15e61a32282 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -3,32 +3,31 @@ // Package sso provides the client and types for making API // requests to AWS Single Sign-On. // -// Amazon Web Services Single Sign On Portal is a web service that makes it -// easy for you to assign user access to Amazon Web Services SSO resources such -// as the AWS access portal. Users can get Amazon Web Services account applications -// and roles assigned to them and get federated into the application. +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity +// Center resources such as the AWS access portal. Users can get AWS account +// applications and roles assigned to them and get federated into the application. // -// Although Amazon Web Services Single Sign-On was renamed, the sso and identitystore -// API namespaces will continue to retain their original name for backward compatibility -// purposes. For more information, see Amazon Web Services SSO rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). // -// This API reference guide describes the Amazon Web Services SSO Portal operations +// This reference guide describes the IAM Identity Center Portal operations // that you can call programatically and includes detailed information on data // types and errors. // -// Amazon Web Services provides SDKs that consist of libraries and sample code -// for various programming languages and platforms, such as Java, Ruby, .Net, -// iOS, or Android. The SDKs provide a convenient way to create programmatic -// access to Amazon Web Services SSO and other Amazon Web Services services. -// For more information about the Amazon Web Services SDKs, including how to -// download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to IAM Identity Center +// and other AWS services. For more information about the AWS SDKs, including +// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). // // See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. // // See sso package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ // -// Using the Client +// # Using the Client // // To contact AWS Single Sign-On with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go index 7a28dc797e9..7094cfe4130 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -40,13 +40,14 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a SSO client from just a session. -// svc := sso.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a SSO client with additional configuration -// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go index 4cac247c188..818cab7cda9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -23,37 +23,37 @@ import ( // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // -// // myFunc uses an SDK service client to make a request to -// // AWS Single Sign-On. -// func myFunc(svc ssoiface.SSOAPI) bool { -// // Make svc.GetRoleCredentials request -// } +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } // -// func main() { -// sess := session.New() -// svc := sso.New(sess) +// func main() { +// sess := session.New() +// svc := sso.New(sess) // -// myFunc(svc) -// } +// myFunc(svc) +// } // // In your _test.go file: // -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSSOClient struct { -// ssoiface.SSOAPI -// } -// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { -// // mock response/functionality -// } +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } // -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSSOClient{} +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} // -// myfunc(mockSvc) +// myfunc(mockSvc) // -// // Verify myFunc's functionality -// } +// // Verify myFunc's functionality +// } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index f1a7bfdd47b..2b7e675ab86 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -28,14 +28,13 @@ const opAssumeRole = "AssumeRole" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) // -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { @@ -66,7 +65,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRole can be used to make // API calls to any Amazon Web Services service with the following exception: @@ -105,10 +104,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // To allow a user to assume a role in the same account, you can do either of // the following: // -// * Attach a policy to the user that allows the user to call AssumeRole -// (as long as the role's trust policy trusts the account). +// - Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). // -// * Add the user as a principal directly in the role's trust policy. +// - Add the user as a principal directly in the role's trust policy. // // You can do either because the role’s trust policy acts as an IAM resource-based // policy. When a resource-based policy grants access to a principal in the @@ -116,7 +115,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // -// Tags +// # Tags // // (Optional) You can pass tag key-value pairs to your session. These tags are // called session tags. For more information about session tags, see Passing @@ -134,7 +133,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// Using MFA with AssumeRole +// # Using MFA with AssumeRole // // (Optional) You can include multi-factor authentication (MFA) information // when you call AssumeRole. This is useful for cross-account scenarios to ensure @@ -163,35 +162,36 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // API operation AssumeRole for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { @@ -231,14 +231,13 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) // -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { @@ -274,7 +273,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // can use these temporary security credentials to sign calls to Amazon Web // Services services. // -// Session Duration +// # Session Duration // // By default, the temporary security credentials created by AssumeRoleWithSAML // last for one hour. However, you can use the optional DurationSeconds parameter @@ -300,7 +299,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // a role using role chaining and provide a DurationSeconds parameter value // greater than one hour, the operation fails. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any Amazon Web Services service with the following exception: @@ -331,7 +330,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // identifiable information (PII). For example, you could instead use the persistent // identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). // -// Tags +// # Tags // // (Optional) You can configure your IdP to pass attributes into your SAML assertion // as session tags. Each session tag consists of a key name and an associated @@ -365,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// SAML Configuration +// # SAML Configuration // // Before your application can call AssumeRoleWithSAML, you must configure your // SAML identity provider (IdP) to issue the claims required by Amazon Web Services. @@ -376,17 +375,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // For more information, see the following resources: // -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. // -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. // -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -396,47 +395,48 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // API operation AssumeRoleWithSAML for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { @@ -476,14 +476,13 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) // -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { @@ -540,7 +539,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // temporary security credentials to sign calls to Amazon Web Services service // API operations. // -// Session Duration +// # Session Duration // // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter @@ -555,7 +554,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any Amazon Web Services service with the following @@ -576,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Tags +// # Tags // // (Optional) You can configure your IdP to pass attributes into your web identity // token as session tags. Each session tag consists of a key name and an associated @@ -610,7 +609,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// Identities +// # Identities // // Before your application can call AssumeRoleWithWebIdentity, you must have // an identity token from a supported identity provider and create a role that @@ -628,24 +627,24 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to Amazon Web Services. +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to Amazon Web Services. // -// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. // -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -655,54 +654,55 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // API operation AssumeRoleWithWebIdentity for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { @@ -742,14 +742,13 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) // -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { @@ -793,18 +792,18 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // The decoded message includes the following type of information: // -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. // -// * The principal who made the request. +// - The principal who made the request. // -// * The requested action. +// - The requested action. // -// * The requested resource. +// - The requested resource. // -// * The values of condition keys in the context of the user's request. +// - The values of condition keys in the context of the user's request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -814,10 +813,10 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // API operation DecodeAuthorizationMessage for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { @@ -857,14 +856,13 @@ const opGetAccessKeyInfo = "GetAccessKeyInfo" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) // -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { @@ -954,14 +952,13 @@ const opGetCallerIdentity = "GetCallerIdentity" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) // -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { @@ -1037,14 +1034,13 @@ const opGetFederationToken = "GetFederationToken" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) // -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { @@ -1094,7 +1090,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // -// Session duration +// # Session duration // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default @@ -1102,15 +1098,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // by using the Amazon Web Services account root user credentials have a maximum // duration of 3,600 seconds (1 hour). // -// Permissions +// # Permissions // // You can use the temporary credentials created by GetFederationToken in any // Amazon Web Services service except the following: // -// * You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. // -// * You cannot call any STS operations except GetCallerIdentity. +// - You cannot call any STS operations except GetCallerIdentity. // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an @@ -1136,7 +1132,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // by the policy. These permissions are granted in addition to the permissions // granted by the session policies. // -// Tags +// # Tags // // (Optional) You can pass tag key-value pairs to your session. These are called // session tags. For more information about session tags, see Passing Session @@ -1172,31 +1168,32 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // API operation GetFederationToken for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { @@ -1236,14 +1233,13 @@ const opGetSessionToken = "GetSessionToken" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) // -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { @@ -1285,7 +1281,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) // in the IAM User Guide. // -// Session Duration +// # Session Duration // // The GetSessionToken operation must be called by using the long-term Amazon // Web Services security credentials of the Amazon Web Services account root @@ -1296,15 +1292,15 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a // default of 1 hour. // -// Permissions +// # Permissions // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any Amazon Web Services service with the following exceptions: // -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. +// - You cannot call any IAM API operations unless MFA authentication information +// is included in the request. // -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity. // // We recommend that you do not call GetSessionToken with Amazon Web Services // account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) @@ -1330,13 +1326,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // API operation GetSessionToken for usage and error information. // // Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index 2d98d92353a..c40f5a2a52b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -14,7 +14,7 @@ // See sts package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ // -// Using the Client +// # Using the Client // // To contact AWS Security Token Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index f324ff108a1..12327d05332 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -39,13 +39,14 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a STS client from just a session. -// svc := sts.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go index e2e1d6efe55..bf06b2e7d08 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -23,37 +23,37 @@ import ( // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } // -// func main() { -// sess := session.New() -// svc := sts.New(sess) +// func main() { +// sess := session.New() +// svc := sts.New(sess) // -// myFunc(svc) -// } +// myFunc(svc) +// } // // In your _test.go file: // -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } // -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} // -// myfunc(mockSvc) +// myfunc(mockSvc) // -// // Verify myFunc's functionality -// } +// // Verify myFunc's functionality +// } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index fd2b3a42b2a..087320da7f0 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -13,21 +13,21 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. package cmp import ( @@ -45,25 +45,25 @@ import ( // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option @@ -144,7 +144,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -639,7 +639,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16cfa..a248e5436d9 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a299731..00000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb5392..1f9ca9c4892 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -336,9 +338,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see Reporter). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index c7100346323..a0a588502ed 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -41,13 +41,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +109,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,7 +161,7 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } @@ -282,7 +284,7 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a transformation from the parent type to the current type. type Transform struct{ *transform } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 1ef65ac1db8..2050bf6b46b 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -117,7 +115,7 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out // For leaf nodes, format the value based on the reflect.Values alone. // As a special case, treat equal []byte as a leaf nodes. - isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0)) + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { @@ -248,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 287b893588e..2ab41fad3fb 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -205,7 +212,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 68b5c1ae164..23e444f62f3 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ffb6..388fcf57120 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 5a3807f978e..4ec00fe7d98 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -21,7 +21,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "math" "path/filepath" "regexp" @@ -153,7 +152,7 @@ type Function struct { // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } @@ -168,7 +167,7 @@ func ParseData(data []byte) (*Profile, error) { if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { - data, err = ioutil.ReadAll(gz) + data, err = io.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 0e643a05b57..129685e3769 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.4.0" + "v2": "2.5.1" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index b42ace44c98..2a05e44572a 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,19 @@ # Changelog +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + ## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 7d0128a0cd5..787a619e373 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -41,6 +41,7 @@ import ( "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // ErrDetails holds the google/rpc/error_details.proto messages. @@ -60,6 +61,30 @@ type ErrDetails struct { Unknown []interface{} } +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + func (e ErrDetails) String() string { var d strings.Builder if e.ErrorInfo != nil { diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 00000000000..e4b03f161d8 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 00000000000..21678ae65c9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index bf272a5045c..a46ae25b0ee 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.4.0" +const Version = "2.5.1" diff --git a/vendor/github.com/googleapis/go-type-adapters/adapters/color.go b/vendor/github.com/googleapis/go-type-adapters/adapters/color.go deleted file mode 100644 index b2b696c707e..00000000000 --- a/vendor/github.com/googleapis/go-type-adapters/adapters/color.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapters - -import ( - "image/color" - "math" - - cpb "google.golang.org/genproto/googleapis/type/color" - wpb "google.golang.org/protobuf/types/known/wrapperspb" -) - -// ProtoColorToRGBA returns an RGBA based on the provided google.type.Color. -// If alpha is not set in the proto, full opacity is assumed. -// -// Note: Converting between a float using [0, 1] to an int using [0, 256) -// causes some cognitive dissonance between accuracy and user expectations. -// For example, most people writing CSS use 0x80 (decimal 128) to mean "half", -// but it is not actually half (it is slightly over). There is actually no -// way to precisely specify the 0.5 float value in a [0, 256) range of -// integers. -// -// This function uses math.Round to address this, meaning that 0.5 will be -// rounded up to 128 rather than rounded down to 127. -// -// Because of this fuzziness and precision loss, it is NOT guaranteed that -// ProtoColorToRGBA and RGBAToProtoColor are exact inverses, and both functions -// will lose precision. -func ProtoColorToRGBA(c *cpb.Color) *color.RGBA { - // Determine the appropriate alpha value. - // If alpha is unset, full opacity is the proper default. - alpha := uint8(255) - if c.Alpha != nil { - alpha = uint8(math.Round(float64(c.GetAlpha().GetValue() * 255))) - } - - // Return the RGBA. - return &color.RGBA{ - R: uint8(math.Round(float64(c.GetRed()) * 255)), - G: uint8(math.Round(float64(c.GetGreen()) * 255)), - B: uint8(math.Round(float64(c.GetBlue()) * 255)), - A: alpha, - } -} - -// RGBAToProtoColor returns a google.type.Color based on the provided RGBA. -// -// Note: Converting between ints using [0, 256) and a float using [0, 1] -// causes some cognitive dissonance between accuracy and user expectations. -// For example, most people using CSS use 0x80 (decimal 128) to mean "half", -// but it is not actually half (it is slightly over). These is actually no -// way to precisely specify the 0.5 float value in a [0, 256) range of -// integers. -// -// This function addresses this by limiting decimal precision to 0.01, on -// the rationale that most precision beyond this point is probably -// unintentional. -// -// Because of this fuzziness and precision loss, it is NOT guaranteed that -// ProtoColorToRGBA and RGBAToProtoColor are exact inverses, and both functions -// will lose precision. -func RGBAToProtoColor(rgba *color.RGBA) *cpb.Color { - return &cpb.Color{ - Red: float32(int(rgba.R)*100/255) / 100, - Green: float32(int(rgba.G)*100/255) / 100, - Blue: float32(int(rgba.B)*100/255) / 100, - Alpha: &wpb.FloatValue{Value: float32(int(rgba.A)*100/255) / 100}, - } -} diff --git a/vendor/github.com/googleapis/go-type-adapters/adapters/date.go b/vendor/github.com/googleapis/go-type-adapters/adapters/date.go deleted file mode 100644 index bea5316112a..00000000000 --- a/vendor/github.com/googleapis/go-type-adapters/adapters/date.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapters - -import ( - "time" - - dpb "google.golang.org/genproto/googleapis/type/date" -) - -// ProtoDateToLocalTime returns a new Time based on the google.type.Date, in -// the system's time zone. -// -// Hours, minues, seconds, and nanoseconds are set to 0. -func ProtoDateToLocalTime(d *dpb.Date) time.Time { - return ProtoDateToTime(d, time.Local) -} - -// ProtoDateToUTCTime returns a new Time based on the google.type.Date, in UTC. -// -// Hours, minutes, seconds, and nanoseconds are set to 0. -func ProtoDateToUTCTime(d *dpb.Date) time.Time { - return ProtoDateToTime(d, time.UTC) -} - -// ProtoDateToTime returns a new Time based on the google.type.Date and provided -// *time.Location. -// -// Hours, minutes, seconds, and nanoseconds are set to 0. -func ProtoDateToTime(d *dpb.Date, l *time.Location) time.Time { - return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l) -} - -// TimeToProtoDate returns a new google.type.Date based on the provided time.Time. -// The location is ignored, as is anything more precise than the day. -func TimeToProtoDate(t time.Time) *dpb.Date { - return &dpb.Date{ - Year: int32(t.Year()), - Month: int32(t.Month()), - Day: int32(t.Day()), - } -} diff --git a/vendor/github.com/googleapis/go-type-adapters/adapters/datetime.go b/vendor/github.com/googleapis/go-type-adapters/adapters/datetime.go deleted file mode 100644 index 77f1dfac2bb..00000000000 --- a/vendor/github.com/googleapis/go-type-adapters/adapters/datetime.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapters - -import ( - "fmt" - "regexp" - "strconv" - "time" - - dtpb "google.golang.org/genproto/googleapis/type/datetime" - durpb "google.golang.org/protobuf/types/known/durationpb" -) - -// ProtoDateTimeToTime returns a new Time based on the google.type.DateTime. -// -// It errors if it gets invalid time zone information. -func ProtoDateTimeToTime(d *dtpb.DateTime) (time.Time, error) { - var err error - - // Determine the location. - loc := time.UTC - if tz := d.GetTimeZone(); tz != nil { - loc, err = time.LoadLocation(tz.GetId()) - if err != nil { - return time.Time{}, err - } - } - if offset := d.GetUtcOffset(); offset != nil { - hours := int(offset.GetSeconds()) / 3600 - loc = time.FixedZone(fmt.Sprintf("UTC%+d", hours), hours) - } - - // Return the Time. - return time.Date( - int(d.GetYear()), - time.Month(d.GetMonth()), - int(d.GetDay()), - int(d.GetHours()), - int(d.GetMinutes()), - int(d.GetSeconds()), - int(d.GetNanos()), - loc, - ), nil -} - -// TimeToProtoDateTime returns a new google.type.DateTime based on the -// provided time.Time. -// -// It errors if it gets invalid time zone information. -func TimeToProtoDateTime(t time.Time) (*dtpb.DateTime, error) { - dt := &dtpb.DateTime{ - Year: int32(t.Year()), - Month: int32(t.Month()), - Day: int32(t.Day()), - Hours: int32(t.Hour()), - Minutes: int32(t.Minute()), - Seconds: int32(t.Second()), - Nanos: int32(t.Nanosecond()), - } - - // If the location is a UTC offset, encode it as such in the proto. - loc := t.Location().String() - if match := offsetRegexp.FindStringSubmatch(loc); len(match) > 0 { - offsetInt, err := strconv.Atoi(match[1]) - if err != nil { - return nil, err - } - dt.TimeOffset = &dtpb.DateTime_UtcOffset{ - UtcOffset: &durpb.Duration{Seconds: int64(offsetInt) * 3600}, - } - } else if loc != "" { - dt.TimeOffset = &dtpb.DateTime_TimeZone{ - TimeZone: &dtpb.TimeZone{Id: loc}, - } - } - - return dt, nil -} - -var offsetRegexp = regexp.MustCompile(`^UTC([+-][\d]{1,2})$`) diff --git a/vendor/github.com/googleapis/go-type-adapters/adapters/decimal.go b/vendor/github.com/googleapis/go-type-adapters/adapters/decimal.go deleted file mode 100644 index 1064acfb705..00000000000 --- a/vendor/github.com/googleapis/go-type-adapters/adapters/decimal.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapters - -import ( - "fmt" - "math" - "math/big" - "regexp" - "strings" - - dpb "google.golang.org/genproto/googleapis/type/decimal" -) - -// ProtoDecimalToFloat converts the provided google.type.Decimal to a big.Float. -func ProtoDecimalToFloat(d *dpb.Decimal) (*big.Float, error) { - value := strings.ToLower(d.GetValue()) - - // Determine the required precision. - v := value - if strings.ContainsRune(v, 'e') { - v = v[0:strings.IndexRune(v, 'e')] - } - v = nan.ReplaceAllLiteralString(v, "") - prec := uint(math.Pow(2, float64(len(v)+1))) - - // Parse and return a big.Float. - f, _, err := big.ParseFloat(value, 10, prec, big.AwayFromZero) - return f, err -} - -// ProtoDecimalToFloat64 converts the provided google.type.Decimal to a float64. -func ProtoDecimalToFloat64(d *dpb.Decimal) (float64, big.Accuracy, error) { - f, err := ProtoDecimalToFloat(d) - if err != nil { - return 0.0, big.Exact, err - } - f64, accuracy := f.Float64() - return f64, accuracy, nil -} - -// Float64ToProtoDecimal converts the provided float64 to a google.type.Decimal. -func Float64ToProtoDecimal(f float64) *dpb.Decimal { - return &dpb.Decimal{ - Value: fmt.Sprintf("%f", f), - } -} - -// FloatToProtoDecimal converts the provided big.Float to a google.type.Decimal. -func FloatToProtoDecimal(f *big.Float) *dpb.Decimal { - return &dpb.Decimal{ - Value: f.String(), - } -} - -var nan = regexp.MustCompile(`[^\d]`) diff --git a/vendor/github.com/googleapis/go-type-adapters/adapters/fraction.go b/vendor/github.com/googleapis/go-type-adapters/adapters/fraction.go deleted file mode 100644 index bc5bb46addd..00000000000 --- a/vendor/github.com/googleapis/go-type-adapters/adapters/fraction.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adapters - -import ( - "math/big" - - fpb "google.golang.org/genproto/googleapis/type/fraction" -) - -// ProtoFractionToRat returns a math/big Rat (rational number) based on the given -// google.type.fraction. -func ProtoFractionToRat(f *fpb.Fraction) *big.Rat { - return big.NewRat(f.GetNumerator(), f.GetDenominator()) -} - -// RatToProtoFraction returns a google.type.Fraction from a math/big Rat. -func RatToProtoFraction(r *big.Rat) *fpb.Fraction { - return &fpb.Fraction{ - Numerator: r.Num().Int64(), - Denominator: r.Denom().Int64(), - } -} diff --git a/vendor/github.com/grafana/regexp/README.md b/vendor/github.com/grafana/regexp/README.md index 756e60dcfdb..ff6e7fccd1b 100644 --- a/vendor/github.com/grafana/regexp/README.md +++ b/vendor/github.com/grafana/regexp/README.md @@ -10,3 +10,10 @@ The `main` branch is non-optimised: switch over to [`speedup`](https://github.co ## Benchmarks: ![image](https://user-images.githubusercontent.com/8125524/152182951-856549ed-6044-4285-b799-69b31f598e32.png) + +## Links to upstream changes: +* [regexp: allow patterns with no alternates to be one-pass](https://go-review.googlesource.com/c/go/+/353711) +* [regexp: speed up onepass prefix check](https://go-review.googlesource.com/c/go/+/354909) +* [regexp: handle prefix string with fold-case](https://go-review.googlesource.com/c/go/+/358756) +* [regexp: avoid copying each instruction executed](https://go-review.googlesource.com/c/go/+/355789) +* [regexp: allow prefix string anchored at beginning](https://go-review.googlesource.com/c/go/+/377294) diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go index 41ae59bcaa2..a8111a7eb8e 100644 --- a/vendor/github.com/grafana/regexp/backtrack.go +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -15,8 +15,9 @@ package regexp import ( - "regexp/syntax" "sync" + + "github.com/grafana/regexp/syntax" ) // A job is an entry on the backtracker's job stack. It holds @@ -163,7 +164,7 @@ func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { } Skip: - inst := re.prog.Inst[pc] + inst := &re.prog.Inst[pc] switch inst.Op { default: @@ -336,17 +337,7 @@ func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []in // but we are not clearing visited between calls to TrySearch, // so no work is duplicated and it ends up still being linear. width := -1 - for ; pos <= end && width != 0; pos += width { - if len(re.prefix) > 0 { - // Match requires literal prefix; fast search for it. - advance := i.index(re, pos) - if advance < 0 { - freeBitState(b) - return nil - } - pos += advance - } - + for pos <= end && width != 0 { if len(b.cap) > 0 { b.cap[0] = pos } @@ -355,6 +346,17 @@ func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []in goto Match } _, width = i.step(pos) + pos += width + + if len(re.prefix) > 0 { + // Match requires literal prefix; fast search for next occurrence. + advance := i.index(re, pos) + if advance < 0 { + freeBitState(b) + return nil + } + pos += advance + } } freeBitState(b) return nil diff --git a/vendor/github.com/grafana/regexp/exec.go b/vendor/github.com/grafana/regexp/exec.go index 4411e4c3e61..11a5eebfe25 100644 --- a/vendor/github.com/grafana/regexp/exec.go +++ b/vendor/github.com/grafana/regexp/exec.go @@ -6,8 +6,9 @@ package regexp import ( "io" - "regexp/syntax" "sync" + + "github.com/grafana/regexp/syntax" ) // A queue is a 'sparse array' holding pending threads of execution. @@ -204,6 +205,8 @@ func (m *machine) match(i input, pos int) bool { // Have match; finished exploring alternatives. break } + // Note we don't check foldCase here, because Unicode folding is complicated; + // just let it fall through to EqualFold on the whole string. if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() { // Match requires literal prefix; fast search for it. advance := i.index(m.re, pos) @@ -401,48 +404,49 @@ func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap in } m := newOnePassMachine() + matched := false + i, _ := m.inputs.init(ir, ib, is) + + r, r1 := endOfText, endOfText + width, width1 := 0, 0 + var flag lazyFlag + var pc int + var inst *onePassInst + + // If there is a simple literal prefix, skip over it. + if pos == 0 && len(re.prefix) > 0 && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + if !i.hasPrefix(re) { + goto Return + } + pos += len(re.prefix) + pc = int(re.prefixEnd) + } else { + pc = re.onepass.Start + } + if cap(m.matchcap) < ncap { m.matchcap = make([]int, ncap) } else { m.matchcap = m.matchcap[:ncap] } - matched := false for i := range m.matchcap { m.matchcap[i] = -1 } - i, _ := m.inputs.init(ir, ib, is) - - r, r1 := endOfText, endOfText - width, width1 := 0, 0 r, width = i.step(pos) - if r != endOfText { - r1, width1 = i.step(pos + width) - } - var flag lazyFlag if pos == 0 { flag = newLazyFlag(-1, r) } else { flag = i.context(pos) } - pc := re.onepass.Start - inst := re.onepass.Inst[pc] // If there is a simple literal prefix, skip over it. - if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && - len(re.prefix) > 0 && i.canCheckPrefix() { - // Match requires literal prefix; fast search for it. - if !i.hasPrefix(re) { - goto Return - } - pos += len(re.prefix) - r, width = i.step(pos) + if r != endOfText { r1, width1 = i.step(pos + width) - flag = i.context(pos) - pc = int(re.prefixEnd) } for { - inst = re.onepass.Inst[pc] + inst = &re.onepass.Inst[pc] pc = int(inst.Out) switch inst.Op { default: @@ -470,7 +474,7 @@ func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap in } // peek at the input rune to see which branch of the Alt to take case syntax.InstAlt, syntax.InstAltMatch: - pc = int(onePassNext(&inst, r)) + pc = int(onePassNext(inst, r)) continue case syntax.InstFail: goto Return @@ -528,6 +532,29 @@ func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap i return nil } + // Check prefix match before allocating data structures + if len(re.prefix) > 0 && r == nil { + if re.cond&syntax.EmptyBeginText != 0 { // anchored + if b != nil && !(&inputBytes{str: b[pos:]}).hasPrefix(re) { + return nil + } + if s != "" && !(&inputString{str: s[pos:]}).hasPrefix(re) { + return nil + } + } else { // non-anchored + var advance int + if b != nil { + advance = (&inputBytes{str: b}).index(re, pos) + } else { + advance = (&inputString{str: s}).index(re, pos) + } + if advance < 0 { + return nil + } + pos += advance + } + } + if re.onepass != nil { return re.doOnePass(r, b, s, pos, ncap, dstCap) } diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go index 2f3ce6f9f6c..d3613bceaf7 100644 --- a/vendor/github.com/grafana/regexp/onepass.go +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -5,10 +5,12 @@ package regexp import ( - "regexp/syntax" "sort" "strings" "unicode" + "unicode/utf8" + + "github.com/grafana/regexp/syntax" ) // "One-pass" regexp execution. @@ -37,10 +39,10 @@ type onePassInst struct { // is the entire match. Pc is the index of the last rune instruction // in the string. The OnePassPrefix skips over the mandatory // EmptyBeginText -func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { +func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, foldCase bool, pc uint32) { i := &p.Inst[p.Start] if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { - return "", i.Op == syntax.InstMatch, uint32(p.Start) + return "", i.Op == syntax.InstMatch, false, uint32(p.Start) } pc = i.Out i = &p.Inst[pc] @@ -50,12 +52,13 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { } // Avoid allocation of buffer if prefix is empty. if iop(i) != syntax.InstRune || len(i.Rune) != 1 { - return "", i.Op == syntax.InstMatch, uint32(p.Start) + return "", i.Op == syntax.InstMatch, false, uint32(p.Start) } + foldCase = (syntax.Flags(i.Arg)&syntax.FoldCase != 0) // Have prefix; gather characters. var buf strings.Builder - for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 { + for iop(i) == syntax.InstRune && len(i.Rune) == 1 && (syntax.Flags(i.Arg)&syntax.FoldCase != 0) == foldCase && i.Rune[0] != utf8.RuneError { buf.WriteRune(i.Rune[0]) pc, i = i.Out, &p.Inst[i.Out] } @@ -64,7 +67,7 @@ func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { p.Inst[i.Out].Op == syntax.InstMatch { complete = true } - return buf.String(), complete, pc + return buf.String(), complete, foldCase, pc } // OnePassNext selects the next actionable state of the prog, based on the input character. @@ -471,12 +474,20 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) { syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { return nil } - // every instruction leading to InstMatch must be EmptyEndText + hasAlt := false + for _, inst := range prog.Inst { + if inst.Op == syntax.InstAlt || inst.Op == syntax.InstAltMatch { + hasAlt = true + break + } + } + // If we have alternates, every instruction leading to InstMatch must be EmptyEndText. + // Also, any match on empty text must be $. for _, inst := range prog.Inst { opOut := prog.Inst[inst.Out].Op switch inst.Op { default: - if opOut == syntax.InstMatch { + if opOut == syntax.InstMatch && hasAlt { return nil } case syntax.InstAlt, syntax.InstAltMatch: diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go index b547a2ab97d..1479e373174 100644 --- a/vendor/github.com/grafana/regexp/regexp.go +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -9,17 +9,22 @@ // More precisely, it is the syntax accepted by RE2 and described at // https://golang.org/s/re2syntax, except for \C. // For an overview of the syntax, run -// go doc regexp/syntax +// +// go doc regexp/syntax // // The regexp implementation provided by this package is // guaranteed to run in time linear in the size of the input. // (This is a property not guaranteed by most open source // implementations of regular expressions.) For more information // about this property, see +// // https://swtch.com/~rsc/regexp/regexp1.html +// // or any book about automata theory. // // All characters are UTF-8-encoded code points. +// Following utf8.DecodeRune, each byte of an invalid UTF-8 sequence +// is treated as if it encoded utf8.RuneError (U+FFFD). // // There are 16 methods of Regexp that match a regular expression and identify // the matched text. Their names are matched by this regular expression: @@ -40,11 +45,11 @@ // successive submatches of the expression. Submatches are matches of // parenthesized subexpressions (also known as capturing groups) within the // regular expression, numbered from left to right in order of opening -// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 +// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 is // the match of the first parenthesized subexpression, and so on. // // If 'Index' is present, matches and submatches are identified by byte index -// pairs within the input string: result[2*n:2*n+1] identifies the indexes of +// pairs within the input string: result[2*n:2*n+2] identifies the indexes of // the nth submatch. The pair for n==0 identifies the match of the entire // expression. If 'Index' is not present, the match is identified by the text // of the match/submatch. If an index is negative or text is nil, it means that @@ -62,18 +67,18 @@ // before returning. // // (There are a few other methods that do not match this pattern.) -// package regexp import ( "bytes" "io" - "regexp/syntax" "strconv" "strings" "sync" "unicode" "unicode/utf8" + + "github.com/grafana/regexp/syntax" ) // Regexp is the representation of a compiled regular expression. @@ -90,6 +95,7 @@ type Regexp struct { prefixBytes []byte // prefix, as a []byte prefixRune rune // first rune in prefix prefixEnd uint32 // pc for last rune in prefix + prefixFoldCase bool // check prefix case-insensitive mpool int // pool for machines matchcap int // size of recorded match lengths prefixComplete bool // prefix is the entire regexp @@ -195,10 +201,10 @@ func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { minInputLen: minInputLen(re), } if regexp.onepass == nil { - regexp.prefix, regexp.prefixComplete = prog.Prefix() + regexp.prefix, regexp.prefixComplete, regexp.prefixFoldCase = prog.PrefixAndCase() regexp.maxBitStateLen = maxBitStateLen(prog) } else { - regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) + regexp.prefix, regexp.prefixComplete, regexp.prefixFoldCase, regexp.prefixEnd = onePassPrefix(prog) } if regexp.prefix != "" { // TODO(rsc): Remove this allocation by adding @@ -276,7 +282,11 @@ func minInputLen(re *syntax.Regexp) int { case syntax.OpLiteral: l := 0 for _, r := range re.Rune { - l += utf8.RuneLen(r) + if r == utf8.RuneError { + l++ + } else { + l += utf8.RuneLen(r) + } } return l case syntax.OpCapture, syntax.OpPlus: @@ -396,10 +406,26 @@ func (i *inputString) canCheckPrefix() bool { } func (i *inputString) hasPrefix(re *Regexp) bool { + if re.prefixFoldCase { + n := len(re.prefix) + return len(i.str) >= n && strings.EqualFold(i.str[0:n], re.prefix) + } return strings.HasPrefix(i.str, re.prefix) } func (i *inputString) index(re *Regexp, pos int) int { + if re.prefixFoldCase { + n := len(re.prefix) + // Brute-force compare at every position; could replace with sophisticated algo like strings.Index + for p := pos; p+n <= len(i.str); { + if strings.EqualFold(i.str[p:p+n], re.prefix) { + return p - pos + } + _, w := i.step(p) + p += w + } + return -1 + } return strings.Index(i.str[pos:], re.prefix) } @@ -443,10 +469,26 @@ func (i *inputBytes) canCheckPrefix() bool { } func (i *inputBytes) hasPrefix(re *Regexp) bool { + if re.prefixFoldCase { + n := len(re.prefixBytes) + return len(i.str) >= n && bytes.EqualFold(i.str[0:n], re.prefixBytes) + } return bytes.HasPrefix(i.str, re.prefixBytes) } func (i *inputBytes) index(re *Regexp, pos int) int { + if re.prefixFoldCase { + n := len(re.prefixBytes) + // Brute-force compare at every position; could replace with sophisticated algo like bytes.Index + for p := pos; p+n <= len(i.str); { + if bytes.EqualFold(i.str[p:p+n], re.prefixBytes) { + return p - pos + } + _, w := i.step(p) + p += w + } + return -1 + } return bytes.Index(i.str[pos:], re.prefixBytes) } @@ -787,11 +829,12 @@ func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) { accept = false } var width int - // TODO: use step() if b == nil { - _, width = utf8.DecodeRuneInString(s[pos:end]) + is := inputString{str: s} + _, width = is.step(pos) } else { - _, width = utf8.DecodeRune(b[pos:end]) + ib := inputBytes{str: b} + _, width = ib.step(pos) } if width > 0 { pos += width @@ -922,23 +965,22 @@ func (re *Regexp) ExpandString(dst []byte, template string, src string, match [] func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte { for len(template) > 0 { - i := strings.Index(template, "$") - if i < 0 { + before, after, ok := strings.Cut(template, "$") + if !ok { break } - dst = append(dst, template[:i]...) - template = template[i:] - if len(template) > 1 && template[1] == '$' { + dst = append(dst, before...) + template = after + if template != "" && template[0] == '$' { // Treat $$ as $. dst = append(dst, '$') - template = template[2:] + template = template[1:] continue } name, num, rest, ok := extract(template) if !ok { // Malformed; treat $ as raw text. dst = append(dst, '$') - template = template[1:] continue } template = rest @@ -967,17 +1009,16 @@ func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, m return dst } -// extract returns the name from a leading "$name" or "${name}" in str. +// extract returns the name from a leading "name" or "{name}" in str. +// (The $ has already been removed by the caller.) // If it is a number, extract returns num set to that number; otherwise num = -1. func extract(str string) (name string, num int, rest string, ok bool) { - if len(str) < 2 || str[0] != '$' { + if str == "" { return } brace := false - if str[1] == '{' { + if str[0] == '{' { brace = true - str = str[2:] - } else { str = str[1:] } i := 0 @@ -1234,13 +1275,15 @@ func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { // that contains no metacharacters, it is equivalent to strings.SplitN. // // Example: -// s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5) -// // s: ["", "b", "b", "c", "cadaaae"] +// +// s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5) +// // s: ["", "b", "b", "c", "cadaaae"] // // The count determines the number of substrings to return: -// n > 0: at most n substrings; the last substring will be the unsplit remainder. -// n == 0: the result is nil (zero substrings) -// n < 0: all substrings +// +// n > 0: at most n substrings; the last substring will be the unsplit remainder. +// n == 0: the result is nil (zero substrings) +// n < 0: all substrings func (re *Regexp) Split(s string, n int) []string { if n == 0 { diff --git a/vendor/github.com/grafana/regexp/syntax/doc.go b/vendor/github.com/grafana/regexp/syntax/doc.go index b3f9136b5f3..f6a4b43f7ae 100644 --- a/vendor/github.com/grafana/regexp/syntax/doc.go +++ b/vendor/github.com/grafana/regexp/syntax/doc.go @@ -9,123 +9,132 @@ Package syntax parses regular expressions into parse trees and compiles parse trees into programs. Most clients of regular expressions will use the facilities of package regexp (such as Compile and Match) instead of this package. -Syntax +# Syntax The regular expression syntax understood by this package when parsing with the Perl flag is as follows. Parts of the syntax can be disabled by passing alternate flags to Parse. - Single characters: - . any character, possibly including newline (flag s=true) - [xyz] character class - [^xyz] negated character class - \d Perl character class - \D negated Perl character class - [[:alpha:]] ASCII character class - [[:^alpha:]] negated ASCII character class - \pN Unicode character class (one-letter name) - \p{Greek} Unicode character class - \PN negated Unicode character class (one-letter name) - \P{Greek} negated Unicode character class + + . any character, possibly including newline (flag s=true) + [xyz] character class + [^xyz] negated character class + \d Perl character class + \D negated Perl character class + [[:alpha:]] ASCII character class + [[:^alpha:]] negated ASCII character class + \pN Unicode character class (one-letter name) + \p{Greek} Unicode character class + \PN negated Unicode character class (one-letter name) + \P{Greek} negated Unicode character class Composites: - xy x followed by y - x|y x or y (prefer x) + + xy x followed by y + x|y x or y (prefer x) Repetitions: - x* zero or more x, prefer more - x+ one or more x, prefer more - x? zero or one x, prefer one - x{n,m} n or n+1 or ... or m x, prefer more - x{n,} n or more x, prefer more - x{n} exactly n x - x*? zero or more x, prefer fewer - x+? one or more x, prefer fewer - x?? zero or one x, prefer zero - x{n,m}? n or n+1 or ... or m x, prefer fewer - x{n,}? n or more x, prefer fewer - x{n}? exactly n x + + x* zero or more x, prefer more + x+ one or more x, prefer more + x? zero or one x, prefer one + x{n,m} n or n+1 or ... or m x, prefer more + x{n,} n or more x, prefer more + x{n} exactly n x + x*? zero or more x, prefer fewer + x+? one or more x, prefer fewer + x?? zero or one x, prefer zero + x{n,m}? n or n+1 or ... or m x, prefer fewer + x{n,}? n or more x, prefer fewer + x{n}? exactly n x Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} reject forms that create a minimum or maximum repetition count above 1000. Unlimited repetitions are not subject to this restriction. Grouping: - (re) numbered capturing group (submatch) - (?Pre) named & numbered capturing group (submatch) - (?:re) non-capturing group - (?flags) set flags within current group; non-capturing - (?flags:re) set flags during re; non-capturing - Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: + (re) numbered capturing group (submatch) + (?Pre) named & numbered capturing group (submatch) + (?:re) non-capturing group + (?flags) set flags within current group; non-capturing + (?flags:re) set flags during re; non-capturing + + Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: - i case-insensitive (default false) - m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) - s let . match \n (default false) - U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) + i case-insensitive (default false) + m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) + s let . match \n (default false) + U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) Empty strings: - ^ at beginning of text or line (flag m=true) - $ at end of text (like \z not \Z) or line (flag m=true) - \A at beginning of text - \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) - \B not at ASCII word boundary - \z at end of text + + ^ at beginning of text or line (flag m=true) + $ at end of text (like \z not \Z) or line (flag m=true) + \A at beginning of text + \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) + \B not at ASCII word boundary + \z at end of text Escape sequences: - \a bell (== \007) - \f form feed (== \014) - \t horizontal tab (== \011) - \n newline (== \012) - \r carriage return (== \015) - \v vertical tab character (== \013) - \* literal *, for any punctuation character * - \123 octal character code (up to three digits) - \x7F hex character code (exactly two digits) - \x{10FFFF} hex character code - \Q...\E literal text ... even if ... has punctuation + + \a bell (== \007) + \f form feed (== \014) + \t horizontal tab (== \011) + \n newline (== \012) + \r carriage return (== \015) + \v vertical tab character (== \013) + \* literal *, for any punctuation character * + \123 octal character code (up to three digits) + \x7F hex character code (exactly two digits) + \x{10FFFF} hex character code + \Q...\E literal text ... even if ... has punctuation Character class elements: - x single character - A-Z character range (inclusive) - \d Perl character class - [:foo:] ASCII character class foo - \p{Foo} Unicode character class Foo - \pF Unicode character class F (one-letter name) + + x single character + A-Z character range (inclusive) + \d Perl character class + [:foo:] ASCII character class foo + \p{Foo} Unicode character class Foo + \pF Unicode character class F (one-letter name) Named character classes as character class elements: - [\d] digits (== \d) - [^\d] not digits (== \D) - [\D] not digits (== \D) - [^\D] not not digits (== \d) - [[:name:]] named ASCII class inside character class (== [:name:]) - [^[:name:]] named ASCII class inside negated character class (== [:^name:]) - [\p{Name}] named Unicode property inside character class (== \p{Name}) - [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) + + [\d] digits (== \d) + [^\d] not digits (== \D) + [\D] not digits (== \D) + [^\D] not not digits (== \d) + [[:name:]] named ASCII class inside character class (== [:name:]) + [^[:name:]] named ASCII class inside negated character class (== [:^name:]) + [\p{Name}] named Unicode property inside character class (== \p{Name}) + [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) Perl character classes (all ASCII-only): - \d digits (== [0-9]) - \D not digits (== [^0-9]) - \s whitespace (== [\t\n\f\r ]) - \S not whitespace (== [^\t\n\f\r ]) - \w word characters (== [0-9A-Za-z_]) - \W not word characters (== [^0-9A-Za-z_]) + + \d digits (== [0-9]) + \D not digits (== [^0-9]) + \s whitespace (== [\t\n\f\r ]) + \S not whitespace (== [^\t\n\f\r ]) + \w word characters (== [0-9A-Za-z_]) + \W not word characters (== [^0-9A-Za-z_]) ASCII character classes: - [[:alnum:]] alphanumeric (== [0-9A-Za-z]) - [[:alpha:]] alphabetic (== [A-Za-z]) - [[:ascii:]] ASCII (== [\x00-\x7F]) - [[:blank:]] blank (== [\t ]) - [[:cntrl:]] control (== [\x00-\x1F\x7F]) - [[:digit:]] digits (== [0-9]) - [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) - [[:lower:]] lower case (== [a-z]) - [[:print:]] printable (== [ -~] == [ [:graph:]]) - [[:punct:]] punctuation (== [!-/:-@[-`{-~]) - [[:space:]] whitespace (== [\t\n\v\f\r ]) - [[:upper:]] upper case (== [A-Z]) - [[:word:]] word characters (== [0-9A-Za-z_]) - [[:xdigit:]] hex digit (== [0-9A-Fa-f]) + + [[:alnum:]] alphanumeric (== [0-9A-Za-z]) + [[:alpha:]] alphabetic (== [A-Za-z]) + [[:ascii:]] ASCII (== [\x00-\x7F]) + [[:blank:]] blank (== [\t ]) + [[:cntrl:]] control (== [\x00-\x1F\x7F]) + [[:digit:]] digits (== [0-9]) + [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) + [[:lower:]] lower case (== [a-z]) + [[:print:]] printable (== [ -~] == [ [:graph:]]) + [[:punct:]] punctuation (== [!-/:-@[-`{-~]) + [[:space:]] whitespace (== [\t\n\v\f\r ]) + [[:upper:]] upper case (== [A-Z]) + [[:word:]] word characters (== [0-9A-Za-z_]) + [[:xdigit:]] hex digit (== [0-9A-Fa-f]) Unicode character classes are those in unicode.Categories and unicode.Scripts. */ diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go index d7cf2afa5e9..b6d348d00cd 100644 --- a/vendor/github.com/grafana/regexp/syntax/parse.go +++ b/vendor/github.com/grafana/regexp/syntax/parse.go @@ -43,6 +43,7 @@ const ( ErrMissingRepeatArgument ErrorCode = "missing argument to repetition operator" ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression" ErrUnexpectedParen ErrorCode = "unexpected )" + ErrNestingDepth ErrorCode = "expression nests too deeply" ) func (e ErrorCode) String() string { @@ -90,15 +91,49 @@ const ( // until we've allocated at least maxHeight Regexp structures. const maxHeight = 1000 +// maxSize is the maximum size of a compiled regexp in Insts. +// It too is somewhat arbitrarily chosen, but the idea is to be large enough +// to allow significant regexps while at the same time small enough that +// the compiled form will not take up too much memory. +// 128 MB is enough for a 3.3 million Inst structures, which roughly +// corresponds to a 3.3 MB regexp. +const ( + maxSize = 128 << 20 / instSize + instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words +) + +// maxRunes is the maximum number of runes allowed in a regexp tree +// counting the runes in all the nodes. +// Ignoring character classes p.numRunes is always less than the length of the regexp. +// Character classes can make it much larger: each \pL adds 1292 runes. +// 128 MB is enough for 32M runes, which is over 26k \pL instances. +// Note that repetitions do not make copies of the rune slices, +// so \pL{1000} is only one rune slice, not 1000. +// We could keep a cache of character classes we've seen, +// so that all the \pL we see use the same rune list, +// but that doesn't remove the problem entirely: +// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()]. +// And because the Rune slice is exposed directly in the Regexp, +// there is not an opportunity to change the representation to allow +// partial sharing between different character classes. +// So the limit is the best we can do. +const ( + maxRunes = 128 << 20 / runeSize + runeSize = 4 // rune is int32 +) + type parser struct { flags Flags // parse mode flags stack []*Regexp // stack of parsed expressions free *Regexp numCap int // number of capturing groups seen wholeRegexp string - tmpClass []rune // temporary char class work space - numRegexp int // number of regexps allocated - height map[*Regexp]int // regexp height for height limit check + tmpClass []rune // temporary char class work space + numRegexp int // number of regexps allocated + numRunes int // number of runes in char classes + repeats int64 // product of all repetitions seen + height map[*Regexp]int // regexp height, for height limit check + size map[*Regexp]int64 // regexp compiled size, for size limit check } func (p *parser) newRegexp(op Op) *Regexp { @@ -122,6 +157,104 @@ func (p *parser) reuse(re *Regexp) { p.free = re } +func (p *parser) checkLimits(re *Regexp) { + if p.numRunes > maxRunes { + panic(ErrInternalError) + } + p.checkSize(re) + p.checkHeight(re) +} + +func (p *parser) checkSize(re *Regexp) { + if p.size == nil { + // We haven't started tracking size yet. + // Do a relatively cheap check to see if we need to start. + // Maintain the product of all the repeats we've seen + // and don't track if the total number of regexp nodes + // we've seen times the repeat product is in budget. + if p.repeats == 0 { + p.repeats = 1 + } + if re.Op == OpRepeat { + n := re.Max + if n == -1 { + n = re.Min + } + if n <= 0 { + n = 1 + } + if int64(n) > maxSize/p.repeats { + p.repeats = maxSize + } else { + p.repeats *= int64(n) + } + } + if int64(p.numRegexp) < maxSize/p.repeats { + return + } + + // We need to start tracking size. + // Make the map and belatedly populate it + // with info about everything we've constructed so far. + p.size = make(map[*Regexp]int64) + for _, re := range p.stack { + p.checkSize(re) + } + } + + if p.calcSize(re, true) > maxSize { + panic(ErrInternalError) + } +} + +func (p *parser) calcSize(re *Regexp, force bool) int64 { + if !force { + if size, ok := p.size[re]; ok { + return size + } + } + + var size int64 + switch re.Op { + case OpLiteral: + size = int64(len(re.Rune)) + case OpCapture, OpStar: + // star can be 1+ or 2+; assume 2 pessimistically + size = 2 + p.calcSize(re.Sub[0], false) + case OpPlus, OpQuest: + size = 1 + p.calcSize(re.Sub[0], false) + case OpConcat: + for _, sub := range re.Sub { + size += p.calcSize(sub, false) + } + case OpAlternate: + for _, sub := range re.Sub { + size += p.calcSize(sub, false) + } + if len(re.Sub) > 1 { + size += int64(len(re.Sub)) - 1 + } + case OpRepeat: + sub := p.calcSize(re.Sub[0], false) + if re.Max == -1 { + if re.Min == 0 { + size = 2 + sub // x* + } else { + size = 1 + int64(re.Min)*sub // xxx+ + } + break + } + // x{2,5} = xx(x(x(x)?)?)? + size = int64(re.Max)*sub + int64(re.Max-re.Min) + } + + if size < 1 { + size = 1 + } + p.size[re] = size + return size +} + func (p *parser) checkHeight(re *Regexp) { if p.numRegexp < maxHeight { return @@ -133,7 +266,7 @@ func (p *parser) checkHeight(re *Regexp) { } } if p.calcHeight(re, true) > maxHeight { - panic(ErrInternalError) + panic(ErrNestingDepth) } } @@ -158,6 +291,7 @@ func (p *parser) calcHeight(re *Regexp, force bool) int { // push pushes the regexp re onto the parse stack and returns the regexp. func (p *parser) push(re *Regexp) *Regexp { + p.numRunes += len(re.Rune) if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] { // Single rune. if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) { @@ -189,7 +323,7 @@ func (p *parser) push(re *Regexp) *Regexp { } p.stack = append(p.stack, re) - p.checkHeight(re) + p.checkLimits(re) return re } @@ -299,7 +433,7 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) ( re.Sub = re.Sub0[:1] re.Sub[0] = sub p.stack[n-1] = re - p.checkHeight(re) + p.checkLimits(re) if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) { return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]} @@ -444,12 +578,16 @@ func (p *parser) collapse(subs []*Regexp, op Op) *Regexp { // frees (passes to p.reuse) any removed *Regexps. // // For example, -// ABC|ABD|AEF|BCX|BCY +// +// ABC|ABD|AEF|BCX|BCY +// // simplifies by literal prefix extraction to -// A(B(C|D)|EF)|BC(X|Y) +// +// A(B(C|D)|EF)|BC(X|Y) +// // which simplifies by character class introduction to -// A(B[CD]|EF)|BC[XY] // +// A(B[CD]|EF)|BC[XY] func (p *parser) factor(sub []*Regexp) []*Regexp { if len(sub) < 2 { return sub @@ -503,6 +641,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp { for j := start; j < i; j++ { sub[j] = p.removeLeadingString(sub[j], len(str)) + p.checkLimits(sub[j]) } suffix := p.collapse(sub[start:i], OpAlternate) // recurse @@ -560,6 +699,7 @@ func (p *parser) factor(sub []*Regexp) []*Regexp { for j := start; j < i; j++ { reuse := j != start // prefix came from sub[start] sub[j] = p.removeLeadingRegexp(sub[j], reuse) + p.checkLimits(sub[j]) } suffix := p.collapse(sub[start:i], OpAlternate) // recurse @@ -757,8 +897,10 @@ func parse(s string, flags Flags) (_ *Regexp, err error) { panic(r) case nil: // ok - case ErrInternalError: + case ErrInternalError: // too big err = &Error{Code: ErrInternalError, Expr: s} + case ErrNestingDepth: + err = &Error{Code: ErrNestingDepth, Expr: s} } }() @@ -892,13 +1034,7 @@ func parse(s string, flags Flags) (_ *Regexp, err error) { case 'Q': // \Q ... \E: the ... is always literals var lit string - if i := strings.Index(t, `\E`); i < 0 { - lit = t[2:] - t = "" - } else { - lit = t[2:i] - t = t[i+2:] - } + lit, t, _ = strings.Cut(t[2:], `\E`) for lit != "" { c, rest, err := nextRune(lit) if err != nil { diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go index ae7a9a2fe01..2bf1dcaac02 100644 --- a/vendor/github.com/grafana/regexp/syntax/prog.go +++ b/vendor/github.com/grafana/regexp/syntax/prog.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" "unicode" + "unicode/utf8" ) // Compiled program. @@ -101,7 +102,7 @@ func EmptyOpContext(r1, r2 rune) EmptyOp { return op } -// IsWordChar reports whether r is consider a ``word character'' +// IsWordChar reports whether r is considered a “word character” // during the evaluation of the \b and \B zero-width assertions. // These assertions are ASCII-only: the word characters are [A-Za-z0-9_]. func IsWordChar(r rune) bool { @@ -145,20 +146,37 @@ func (i *Inst) op() InstOp { // regexp must start with. Complete is true if the prefix // is the entire match. func (p *Prog) Prefix() (prefix string, complete bool) { - i := p.skipNop(uint32(p.Start)) + prefix, complete, foldCase := p.PrefixAndCase() + if foldCase { + return "", false + } + return prefix, complete +} + +// Prefix returns a literal string that all matches for the +// regexp must start with. Complete is true if the prefix +// is the entire match. FoldCase is true if the string should +// match in upper or lower case. +func (p *Prog) PrefixAndCase() (prefix string, complete bool, foldCase bool) { + i := &p.Inst[p.Start] + // Skip any no-op, capturing or begin-text instructions + for i.Op == InstNop || i.Op == InstCapture || (i.Op == InstEmptyWidth && EmptyOp(i.Arg)&EmptyBeginText != 0) { + i = &p.Inst[i.Out] + } // Avoid allocation of buffer if prefix is empty. if i.op() != InstRune || len(i.Rune) != 1 { - return "", i.Op == InstMatch + return "", i.Op == InstMatch, false } // Have prefix; gather characters. var buf strings.Builder - for i.op() == InstRune && len(i.Rune) == 1 && Flags(i.Arg)&FoldCase == 0 { + foldCase = (Flags(i.Arg)&FoldCase != 0) + for i.op() == InstRune && len(i.Rune) == 1 && (Flags(i.Arg)&FoldCase != 0) == foldCase && i.Rune[0] != utf8.RuneError { buf.WriteRune(i.Rune[0]) i = p.skipNop(i.Out) } - return buf.String(), i.Op == InstMatch + return buf.String(), i.Op == InstMatch, foldCase } // StartCond returns the leading empty-width conditions that must diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index b5d1e6cf81b..9b1b81f5292 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -178,11 +178,17 @@ type serverMetadataKey struct{} // NewServerMetadataContext creates a new context with ServerMetadata func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + if ctx == nil { + ctx = context.Background() + } return context.WithValue(ctx, serverMetadataKey{}, md) } // ServerMetadataFromContext returns the ServerMetadata in ctx func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + if ctx == nil { + return md, false + } md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) return } diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 80ae325eac5..84a2bdbc653 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -20,6 +20,7 @@ type Node struct { CreateIndex uint64 ModifyIndex uint64 Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` } type ServiceAddress struct { diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index ee55b55ad24..acdb5bfa86a 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -196,6 +196,11 @@ type PassiveHealthCheck struct { // MaxFailures is the count of consecutive failures that results in a host // being removed from the pool. MaxFailures uint32 `alias:"max_failures"` + + // EnforcingConsecutive5xx is the % chance that a host will be actually ejected + // when an outlier status is detected through consecutive 5xx. + // This setting can be used to disable ejection or to ramp it up slowly. + EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"` } // UpstreamLimits describes the limits that are associated with a specific @@ -218,21 +223,24 @@ type UpstreamLimits struct { } type ServiceConfigEntry struct { - Kind string - Name string - Partition string `json:",omitempty"` - Namespace string `json:",omitempty"` - Protocol string `json:",omitempty"` - Mode ProxyMode `json:",omitempty"` - TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - ExternalSNI string `json:",omitempty" alias:"external_sni"` - UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` - Destination *DestinationConfig `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 + Kind string + Name string + Partition string `json:",omitempty"` + Namespace string `json:",omitempty"` + Protocol string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + ExternalSNI string `json:",omitempty" alias:"external_sni"` + UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` + Destination *DestinationConfig `json:",omitempty"` + MaxInboundConnections int `json:",omitempty" alias:"max_inbound_connections"` + LocalConnectTimeoutMs int `json:",omitempty" alias:"local_connect_timeout_ms"` + LocalRequestTimeoutMs int `json:",omitempty" alias:"local_request_timeout_ms"` + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 } func (s *ServiceConfigEntry) GetKind() string { return s.Kind } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go index e162b5fa600..0827e5816b4 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_exports.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_exports.go @@ -57,7 +57,7 @@ type ServiceConsumer struct { func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } func (e *ExportedServicesConfigEntry) GetName() string { return e.Name } func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name } -func (e *ExportedServicesConfigEntry) GetNamespace() string { return splitDefaultNamespace } +func (e *ExportedServicesConfigEntry) GetNamespace() string { return "" } func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta } func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 2bcb3cb52e9..0886bb12acd 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -45,6 +45,8 @@ type HealthCheck struct { Type string Namespace string `json:",omitempty"` Partition string `json:",omitempty"` + ExposedPort int + PeerName string `json:",omitempty"` Definition HealthCheckDefinition @@ -176,8 +178,7 @@ type HealthChecks []*HealthCheck // attached, this function determines the best representative of the status as // as single string using the following heuristic: // -// maintenance > critical > warning > passing -// +// maintenance > critical > warning > passing func (c HealthChecks) AggregatedStatus() string { var passing, warning, critical, maintenance bool for _, check := range c { diff --git a/vendor/github.com/hashicorp/consul/api/oss.go b/vendor/github.com/hashicorp/consul/api/oss.go deleted file mode 100644 index 93d639e6929..00000000000 --- a/vendor/github.com/hashicorp/consul/api/oss.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !consulent -// +build !consulent - -package api - -// The following defaults return "default" in enterprise and "" in OSS. -// This constant is useful when a default value is needed for an -// operation that will reject non-empty values in OSS. -const splitDefaultNamespace = "" -const splitDefaultPartition = "" diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go index 60cd437cb75..7e0518f5805 100644 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -17,6 +17,9 @@ type QueryFailoverOptions struct { Targets []QueryFailoverTarget } +// Deprecated: use QueryFailoverOptions instead. +type QueryDatacenterOptions = QueryFailoverOptions + type QueryFailoverTarget struct { // PeerName specifies a peer to try during failover. PeerName string diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index ce17803f97a..a13f397f81e 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -29,7 +29,7 @@ import ( "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" @@ -501,9 +501,37 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { // StorageConfig configures runtime reloadable configuration options. type StorageConfig struct { + TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"` ExemplarsConfig *ExemplarsConfig `yaml:"exemplars,omitempty"` } +// TSDBConfig configures runtime reloadable configuration options. +type TSDBConfig struct { + // OutOfOrderTimeWindow sets how long back in time an out-of-order sample can be inserted + // into the TSDB. This flag is typically set while unmarshaling the configuration file and translating + // OutOfOrderTimeWindowFlag's duration. The unit of this flag is expected to be the same as any + // other timestamp in the TSDB. + OutOfOrderTimeWindow int64 + + // OutOfOrderTimeWindowFlag holds the parsed duration from the config file. + // During unmarshall, this is converted into milliseconds and stored in OutOfOrderTimeWindow. + // This should not be used directly and must be converted into OutOfOrderTimeWindow. + OutOfOrderTimeWindowFlag model.Duration `yaml:"out_of_order_time_window,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (t *TSDBConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *t = TSDBConfig{} + type plain TSDBConfig + if err := unmarshal((*plain)(t)); err != nil { + return err + } + + t.OutOfOrderTimeWindow = time.Duration(t.OutOfOrderTimeWindowFlag).Milliseconds() + + return nil +} + type TracingClientType string const ( diff --git a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go index a2294d93071..2b11c242abe 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go @@ -40,6 +40,8 @@ const ( dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_" dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target" dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port" + dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_" + dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target" // Constants for instrumentation. namespace = "prometheus" @@ -100,7 +102,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } switch strings.ToUpper(c.Type) { case "SRV": - case "A", "AAAA": + case "A", "AAAA", "MX": if c.Port == 0 { return errors.New("a port is required in DNS-SD configs for all record types except SRV") } @@ -136,6 +138,8 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery { qtype = dns.TypeAAAA case "SRV": qtype = dns.TypeSRV + case "MX": + qtype = dns.TypeMX } d := &Discovery{ names: conf.Names, @@ -195,7 +199,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ } for _, record := range response.Answer { - var target, dnsSrvRecordTarget, dnsSrvRecordPort model.LabelValue + var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget model.LabelValue switch addr := record.(type) { case *dns.SRV: @@ -206,6 +210,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ addr.Target = strings.TrimRight(addr.Target, ".") target = hostPort(addr.Target, int(addr.Port)) + case *dns.MX: + dnsMxRecordTarget = model.LabelValue(addr.Mx) + + // Remove the final dot from rooted DNS names to make them look more usual. + addr.Mx = strings.TrimRight(addr.Mx, ".") + + target = hostPort(addr.Mx, d.port) case *dns.A: target = hostPort(addr.A.String(), d.port) case *dns.AAAA: @@ -222,6 +233,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ dnsNameLabel: model.LabelValue(name), dnsSrvRecordTargetLabel: dnsSrvRecordTarget, dnsSrvRecordPortLabel: dnsSrvRecordPort, + dnsMxRecordTargetLabel: dnsMxRecordTarget, }) } @@ -240,22 +252,22 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ // // There are three possible outcomes: // -// 1. One of the permutations of the given name is recognized as -// "valid" by the DNS, in which case we consider ourselves "done" -// and that answer is returned. Note that, due to the way the DNS -// handles "name has resource records, but none of the specified type", -// the answer received may have an empty set of results. +// 1. One of the permutations of the given name is recognized as +// "valid" by the DNS, in which case we consider ourselves "done" +// and that answer is returned. Note that, due to the way the DNS +// handles "name has resource records, but none of the specified type", +// the answer received may have an empty set of results. // -// 2. All of the permutations of the given name are responded to by one of -// the servers in the "nameservers" list with the answer "that name does -// not exist" (NXDOMAIN). In that case, it can be considered -// pseudo-authoritative that there are no records for that name. +// 2. All of the permutations of the given name are responded to by one of +// the servers in the "nameservers" list with the answer "that name does +// not exist" (NXDOMAIN). In that case, it can be considered +// pseudo-authoritative that there are no records for that name. // -// 3. One or more of the names was responded to by all servers with some -// sort of error indication. In that case, we can't know if, in fact, -// there are records for the name or not, so whatever state the -// configuration is in, we should keep it that way until we know for -// sure (by, presumably, all the names getting answers in the future). +// 3. One or more of the names was responded to by all servers with some +// sort of error indication. In that case, we can't know if, in fact, +// there are records for the name or not, so whatever state the +// configuration is in, we should keep it that way until we know for +// sure (by, presumably, all the names getting answers in the future). // // Outcomes 1 and 2 are indicated by a valid response message (possibly an // empty one) and no error. Outcome 3 is indicated by an error return. The @@ -301,11 +313,11 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms // // A "viable answer" is one which indicates either: // -// 1. "yes, I know that name, and here are its records of the requested type" -// (RCODE==SUCCESS, ANCOUNT > 0); -// 2. "yes, I know that name, but it has no records of the requested type" -// (RCODE==SUCCESS, ANCOUNT==0); or -// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). +// 1. "yes, I know that name, and here are its records of the requested type" +// (RCODE==SUCCESS, ANCOUNT > 0); +// 2. "yes, I know that name, but it has no records of the requested type" +// (RCODE==SUCCESS, ANCOUNT==0); or +// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). // // A non-viable answer is "anything else", which encompasses both various // system-level problems (like network timeouts) and also diff --git a/vendor/github.com/prometheus/prometheus/discovery/file/file.go b/vendor/github.com/prometheus/prometheus/discovery/file/file.go index 8fa1d974374..5aa7f2e5f43 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/file/file.go +++ b/vendor/github.com/prometheus/prometheus/discovery/file/file.go @@ -32,7 +32,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 27423d19c2f..48237bdc0ee 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -333,6 +333,11 @@ func (ls Labels) Map() map[string]string { return m } +// EmptyLabels returns n empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + // New returns a sorted Labels from the given labels. // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { @@ -467,17 +472,25 @@ func (b *Builder) Set(n, v string) *Builder { return b } -// Labels returns the labels from the builder. If no modifications -// were made, the original labels are returned. -func (b *Builder) Labels() Labels { +// Labels returns the labels from the builder, adding them to res if non-nil. +// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels(res Labels) Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } - // In the general case, labels are removed, modified or moved - // rather than added. - res := make(Labels, 0, len(b.base)) + if res == nil { + // In the general case, labels are removed, modified or moved + // rather than added. + res = make(Labels, 0, len(b.base)) + } else { + res = res[:0] + } Outer: + // Justification that res can be the same slice as base: in this loop + // we move forward through base, and either skip an element or assign + // it to res at its current position or an earlier position. for _, l := range b.base { for _, n := range b.del { if l.Name == n { diff --git a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go index 319ee6184ec..a683588d166 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go @@ -15,11 +15,10 @@ package labels import ( "bufio" + "fmt" "os" "sort" "strings" - - "github.com/pkg/errors" ) // Slice is a sortable slice of label sets. @@ -81,7 +80,7 @@ func ReadLabels(fn string, n int) ([]Labels, error) { } if i != n { - return mets, errors.Errorf("requested %d metrics but found %d", n, i) + return mets, fmt.Errorf("requested %d metrics but found %d", n, i) } return mets, nil } diff --git a/vendor/github.com/googleapis/go-type-adapters/adapters/month.go b/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go similarity index 53% rename from vendor/github.com/googleapis/go-type-adapters/adapters/month.go rename to vendor/github.com/prometheus/prometheus/model/metadata/metadata.go index e37e57b8376..f227af0b951 100644 --- a/vendor/github.com/googleapis/go-type-adapters/adapters/month.go +++ b/vendor/github.com/prometheus/prometheus/model/metadata/metadata.go @@ -1,10 +1,9 @@ -// Copyright 2021 Google LLC -// +// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// https://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,20 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package adapters - -import ( - "time" +package metadata - mpb "google.golang.org/genproto/googleapis/type/month" -) - -// ToMonth converts a google.type.Month to a golang Month. -func ToMonth(m mpb.Month) time.Month { - return time.Month(m.Number()) -} +import "github.com/prometheus/prometheus/model/textparse" -// ToProtoMonth converts a golang Month to a google.type.Month. -func ToProtoMonth(m time.Month) mpb.Month { - return mpb.Month(m) +// Metadata stores a series' metadata information. +type Metadata struct { + Type textparse.MetricType + Unit string + Help string } diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 687ac03d083..e0d7f6ddf54 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -19,7 +19,6 @@ import ( "strings" "github.com/grafana/regexp" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -71,7 +70,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { *a = act return nil } - return errors.Errorf("unknown relabel action %q", s) + return fmt.Errorf("unknown relabel action %q", s) } // Config is the configuration for relabeling of target label sets. @@ -105,25 +104,25 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { c.Regex = MustNewRegexp("") } if c.Action == "" { - return errors.Errorf("relabel action cannot be empty") + return fmt.Errorf("relabel action cannot be empty") } if c.Modulus == 0 && c.Action == HashMod { - return errors.Errorf("relabel configuration for hashmod requires non-zero modulus") + return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") } if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase) && c.TargetLabel == "" { - return errors.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) + return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase) && !relabelTarget.MatchString(c.TargetLabel) { - return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase) && c.Replacement != DefaultRelabelConfig.Replacement { - return errors.Errorf("'replacement' can not be set for %s action", c.Action) + return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { - return errors.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) + return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) } if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { - return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if c.Action == LabelDrop || c.Action == LabelKeep { @@ -132,7 +131,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { c.Modulus != DefaultRelabelConfig.Modulus || c.Separator != DefaultRelabelConfig.Separator || c.Replacement != DefaultRelabelConfig.Replacement { - return errors.Errorf("%s action requires only 'regex', and no other fields", c.Action) + return fmt.Errorf("%s action requires only 'regex', and no other fields", c.Action) } } @@ -193,24 +192,29 @@ func (re Regexp) String() string { // are applied in order of input. // If a label set is dropped, nil is returned. // May return the input labelSet modified. -func Process(labels labels.Labels, cfgs ...*Config) labels.Labels { +func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { + lb := labels.NewBuilder(nil) for _, cfg := range cfgs { - labels = relabel(labels, cfg) - if labels == nil { + lbls = relabel(lbls, cfg, lb) + if lbls == nil { return nil } } - return labels + return lbls } -func relabel(lset labels.Labels, cfg *Config) labels.Labels { - values := make([]string, 0, len(cfg.SourceLabels)) +func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels { + var va [16]string + values := va[:0] + if len(cfg.SourceLabels) > cap(values) { + values = make([]string, 0, len(cfg.SourceLabels)) + } for _, ln := range cfg.SourceLabels { values = append(values, lset.Get(string(ln))) } val := strings.Join(values, cfg.Separator) - lb := labels.NewBuilder(lset) + lb.Reset(lset) switch cfg.Action { case Drop: @@ -265,10 +269,10 @@ func relabel(lset labels.Labels, cfg *Config) labels.Labels { } } default: - panic(errors.Errorf("relabel: unknown relabel action type %q", cfg.Action)) + panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action)) } - return lb.Labels() + return lb.Labels(lset) } // sum64 sums the md5 hash to an uint64. diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index 46e00800746..f1d5f39257e 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -16,14 +16,15 @@ package rulefmt import ( "bytes" "context" + "errors" + "fmt" "io" "os" "strings" "time" - "github.com/pkg/errors" "github.com/prometheus/common/model" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" @@ -40,12 +41,21 @@ type Error struct { // Error prints the error message in a formatted string. func (err *Error) Error() string { + if err.Err.err == nil { + return "" + } if err.Err.nodeAlt != nil { - return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error() - } else if err.Err.node != nil { - return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error() + return fmt.Sprintf("%d:%d: %d:%d: group %q, rule %d, %q: %v", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName, err.Err.err) + } + if err.Err.node != nil { + return fmt.Sprintf("%d:%d: group %q, rule %d, %q: %v", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName, err.Err.err) } - return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error() + return fmt.Sprintf("group %q, rule %d, %q: %v", err.Group, err.Rule, err.RuleName, err.Err.err) +} + +// Unwrap unpacks wrapped error for use in errors.Is & errors.As. +func (err *Error) Unwrap() error { + return &err.Err } // WrappedError wraps error with the yaml node which can be used to represent @@ -58,14 +68,23 @@ type WrappedError struct { // Error prints the error message in a formatted string. func (we *WrappedError) Error() string { + if we.err == nil { + return "" + } if we.nodeAlt != nil { - return errors.Wrapf(we.err, "%d:%d: %d:%d", we.node.Line, we.node.Column, we.nodeAlt.Line, we.nodeAlt.Column).Error() - } else if we.node != nil { - return errors.Wrapf(we.err, "%d:%d", we.node.Line, we.node.Column).Error() + return fmt.Sprintf("%d:%d: %d:%d: %v", we.node.Line, we.node.Column, we.nodeAlt.Line, we.nodeAlt.Column, we.err) + } + if we.node != nil { + return fmt.Sprintf("%d:%d: %v", we.node.Line, we.node.Column, we.err) } return we.err.Error() } +// Unwrap unpacks wrapped error for use in errors.Is & errors.As. +func (we *WrappedError) Unwrap() error { + return we.err +} + // RuleGroups is a set of rule groups that are typically exposed in a file. type RuleGroups struct { Groups []RuleGroup `yaml:"groups"` @@ -81,13 +100,13 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { for j, g := range g.Groups { if g.Name == "" { - errs = append(errs, errors.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column)) + errs = append(errs, fmt.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column)) } if _, ok := set[g.Name]; ok { errs = append( errs, - errors.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name), + fmt.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name), ) } @@ -146,7 +165,7 @@ type RuleNode struct { func (r *RuleNode) Validate() (nodes []WrappedError) { if r.Record.Value != "" && r.Alert.Value != "" { nodes = append(nodes, WrappedError{ - err: errors.Errorf("only one of 'record' and 'alert' must be set"), + err: fmt.Errorf("only one of 'record' and 'alert' must be set"), node: &r.Record, nodeAlt: &r.Alert, }) @@ -154,12 +173,12 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { if r.Record.Value == "" && r.Alert.Value == "" { if r.Record.Value == "0" { nodes = append(nodes, WrappedError{ - err: errors.Errorf("one of 'record' or 'alert' must be set"), + err: fmt.Errorf("one of 'record' or 'alert' must be set"), node: &r.Alert, }) } else { nodes = append(nodes, WrappedError{ - err: errors.Errorf("one of 'record' or 'alert' must be set"), + err: fmt.Errorf("one of 'record' or 'alert' must be set"), node: &r.Record, }) } @@ -167,31 +186,31 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { if r.Expr.Value == "" { nodes = append(nodes, WrappedError{ - err: errors.Errorf("field 'expr' must be set in rule"), + err: fmt.Errorf("field 'expr' must be set in rule"), node: &r.Expr, }) } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { nodes = append(nodes, WrappedError{ - err: errors.Wrapf(err, "could not parse expression"), + err: fmt.Errorf("could not parse expression: %w", err), node: &r.Expr, }) } if r.Record.Value != "" { if len(r.Annotations) > 0 { nodes = append(nodes, WrappedError{ - err: errors.Errorf("invalid field 'annotations' in recording rule"), + err: fmt.Errorf("invalid field 'annotations' in recording rule"), node: &r.Record, }) } if r.For != 0 { nodes = append(nodes, WrappedError{ - err: errors.Errorf("invalid field 'for' in recording rule"), + err: fmt.Errorf("invalid field 'for' in recording rule"), node: &r.Record, }) } if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { nodes = append(nodes, WrappedError{ - err: errors.Errorf("invalid recording rule name: %s", r.Record.Value), + err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), node: &r.Record, }) } @@ -200,13 +219,13 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { for k, v := range r.Labels { if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { nodes = append(nodes, WrappedError{ - err: errors.Errorf("invalid label name: %s", k), + err: fmt.Errorf("invalid label name: %s", k), }) } if !model.LabelValue(v).IsValid() { nodes = append(nodes, WrappedError{ - err: errors.Errorf("invalid label value: %s", v), + err: fmt.Errorf("invalid label value: %s", v), }) } } @@ -214,7 +233,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { for k := range r.Annotations { if !model.LabelName(k).IsValid() { nodes = append(nodes, WrappedError{ - err: errors.Errorf("invalid annotation name: %s", k), + err: fmt.Errorf("invalid annotation name: %s", k), }) } } @@ -260,7 +279,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { for k, val := range rl.Labels { err := parseTest(val) if err != nil { - errs = append(errs, errors.Wrapf(err, "label %q", k)) + errs = append(errs, fmt.Errorf("label %q: %w", k, err)) } } @@ -268,7 +287,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) { for k, val := range rl.Annotations { err := parseTest(val) if err != nil { - errs = append(errs, errors.Wrapf(err, "annotation %q", k)) + errs = append(errs, fmt.Errorf("annotation %q: %w", k, err)) } } @@ -287,7 +306,7 @@ func Parse(content []byte) (*RuleGroups, []error) { decoder.KnownFields(true) err := decoder.Decode(&groups) // Ignore io.EOF which happens with empty input. - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { errs = append(errs, err) } err = yaml.Unmarshal(content, &node) @@ -306,11 +325,11 @@ func Parse(content []byte) (*RuleGroups, []error) { func ParseFile(file string) (*RuleGroups, []error) { b, err := os.ReadFile(file) if err != nil { - return nil, []error{errors.Wrap(err, file)} + return nil, []error{fmt.Errorf("%s: %w", file, err)} } rgs, errs := Parse(b) for i := range errs { - errs[i] = errors.Wrap(errs[i], file) + errs[i] = fmt.Errorf("%s: %w", file, errs[i]) } return rgs, errs } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go index 6093c9f59d5..9f17cfd4368 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go @@ -58,8 +58,6 @@ yystate0: goto yystart61 } - goto yystate0 // silence unused label error - goto yystate1 // silence unused label error yystate1: c = l.next() yystart1: @@ -94,7 +92,6 @@ yystate4: goto yystate4 } - goto yystate5 // silence unused label error yystate5: c = l.next() yystart5: @@ -262,7 +259,6 @@ yystate24: c = l.next() goto yyrule4 - goto yystate25 // silence unused label error yystate25: c = l.next() yystart25: @@ -282,7 +278,6 @@ yystate26: goto yystate26 } - goto yystate27 // silence unused label error yystate27: c = l.next() yystart27: @@ -308,7 +303,6 @@ yystate29: c = l.next() goto yyrule7 - goto yystate30 // silence unused label error yystate30: c = l.next() yystart30: @@ -346,7 +340,6 @@ yystate34: c = l.next() goto yyrule11 - goto yystate35 // silence unused label error yystate35: c = l.next() yystart35: @@ -383,7 +376,6 @@ yystate38: goto yystate36 } - goto yystate39 // silence unused label error yystate39: c = l.next() yystart39: @@ -418,7 +410,6 @@ yystate42: c = l.next() goto yyrule9 - goto yystate43 // silence unused label error yystate43: c = l.next() yystart43: @@ -479,7 +470,6 @@ yystate49: c = l.next() goto yyrule18 - goto yystate50 // silence unused label error yystate50: c = l.next() yystart50: @@ -517,7 +507,6 @@ yystate54: c = l.next() goto yyrule20 - goto yystate55 // silence unused label error yystate55: c = l.next() yystart55: @@ -574,7 +563,6 @@ yystate60: goto yystate58 } - goto yystate61 // silence unused label error yystate61: c = l.next() yystart61: @@ -747,16 +735,58 @@ yyrule25: // {S}[^ \n]+ return tTimestamp } yyrule26: // \n - { + if true { // avoid go vet determining the below panic will not be reached l.state = sInit return tLinebreak goto yystate0 } panic("unreachable") - goto yyabort // silence unused label error - yyabort: // no lexem recognized + // + // silence unused label errors for build and satisfy go vet reachability analysis + // + { + if false { + goto yyabort + } + if false { + goto yystate0 + } + if false { + goto yystate1 + } + if false { + goto yystate5 + } + if false { + goto yystate25 + } + if false { + goto yystate27 + } + if false { + goto yystate30 + } + if false { + goto yystate35 + } + if false { + goto yystate39 + } + if false { + goto yystate43 + } + if false { + goto yystate50 + } + if false { + goto yystate55 + } + if false { + goto yystate61 + } + } return tInvalid } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 6c5fde78ca2..be1ffcd0930 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -18,6 +18,7 @@ package textparse import ( "bytes" + "errors" "fmt" "io" "math" @@ -25,8 +26,6 @@ import ( "strings" "unicode/utf8" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" @@ -276,7 +275,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case "unknown": p.mtype = MetricTypeUnknown default: - return EntryInvalid, errors.Errorf("invalid metric type %q", s) + return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } case tHelp: if !utf8.Valid(p.text) { @@ -293,7 +292,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { u := yoloString(p.text) if len(u) > 0 { if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { - return EntryInvalid, errors.Errorf("unit not a suffix of metric %q", m) + return EntryInvalid, fmt.Errorf("unit not a suffix of metric %q", m) } } return EntryUnit, nil @@ -353,7 +352,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { return EntrySeries, nil default: - err = errors.Errorf("%q %q is not a valid start token", t, string(p.l.cur())) + err = fmt.Errorf("%q %q is not a valid start token", t, string(p.l.cur())) } return EntryInvalid, err } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l index c3c5c3bb001..c1bc8e77661 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l @@ -27,6 +27,9 @@ const ( sLValue sValue sTimestamp + sExemplar + sEValue + sETimestamp ) // Lex is called by the parser generated by "go tool yacc" to obtain each diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go index 690ec4e05bb..4076aae610e 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go @@ -1,4 +1,4 @@ -// CAUTION: Generated file - DO NOT EDIT. +// Code generated by golex. DO NOT EDIT. // Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,7 @@ package textparse import ( - "github.com/pkg/errors" + "fmt" ) const ( @@ -47,7 +47,7 @@ yystate0: switch yyt := l.state; yyt { default: - panic(errors.Errorf(`invalid start condition %d`, yyt)) + panic(fmt.Errorf(`invalid start condition %d`, yyt)) case 0: // start condition: INITIAL goto yystart1 case 1: // start condition: sComment @@ -66,8 +66,6 @@ yystate0: goto yystart36 } - goto yystate0 // silence unused label error - goto yystate1 // silence unused label error yystate1: c = l.next() yystart1: @@ -130,7 +128,6 @@ yystate7: goto yystate7 } - goto yystate8 // silence unused label error yystate8: c = l.next() yystart8: @@ -235,7 +232,6 @@ yystate18: goto yystate18 } - goto yystate19 // silence unused label error yystate19: c = l.next() yystart19: @@ -257,7 +253,6 @@ yystate20: goto yystate20 } - goto yystate21 // silence unused label error yystate21: c = l.next() yystart21: @@ -290,7 +285,6 @@ yystate23: goto yystate22 } - goto yystate24 // silence unused label error yystate24: c = l.next() yystart24: @@ -330,7 +324,6 @@ yystate28: c = l.next() goto yyrule13 - goto yystate29 // silence unused label error yystate29: c = l.next() yystart29: @@ -369,7 +362,6 @@ yystate32: goto yystate30 } - goto yystate33 // silence unused label error yystate33: c = l.next() yystart33: @@ -397,7 +389,6 @@ yystate35: c = l.next() goto yyrule11 - goto yystate36 // silence unused label error yystate36: c = l.next() yystart36: @@ -521,16 +512,50 @@ yyrule18: // {D}+ return tTimestamp } yyrule19: // \n - { + if true { // avoid go vet determining the below panic will not be reached l.state = sInit return tLinebreak goto yystate0 } panic("unreachable") - goto yyabort // silence unused label error - yyabort: // no lexem recognized + // + // silence unused label errors for build and satisfy go vet reachability analysis + // + { + if false { + goto yyabort + } + if false { + goto yystate0 + } + if false { + goto yystate1 + } + if false { + goto yystate8 + } + if false { + goto yystate19 + } + if false { + goto yystate21 + } + if false { + goto yystate24 + } + if false { + goto yystate29 + } + if false { + goto yystate33 + } + if false { + goto yystate36 + } + } + // Workaround to gobble up comments that started with a HELP or TYPE // prefix. We just consume all characters until we reach a newline. // This saves us from adding disproportionate complexity to the parser. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 946cd2bc808..758ea5a66af 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -17,6 +17,7 @@ package textparse import ( + "errors" "fmt" "io" "math" @@ -26,8 +27,6 @@ import ( "unicode/utf8" "unsafe" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" @@ -252,7 +251,7 @@ func (p *PromParser) nextToken() token { } func parseError(exp string, got token) error { - return errors.Errorf("%s, got %q", exp, got) + return fmt.Errorf("%s, got %q", exp, got) } // Next advances the parser to the next sample. It returns false if no @@ -301,11 +300,11 @@ func (p *PromParser) Next() (Entry, error) { case "untyped": p.mtype = MetricTypeUnknown default: - return EntryInvalid, errors.Errorf("invalid metric type %q", s) + return EntryInvalid, fmt.Errorf("invalid metric type %q", s) } case tHelp: if !utf8.Valid(p.text) { - return EntryInvalid, errors.Errorf("help text is not a valid utf8 string") + return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string") } } if t := p.nextToken(); t != tLinebreak { @@ -364,7 +363,7 @@ func (p *PromParser) Next() (Entry, error) { return EntrySeries, nil default: - err = errors.Errorf("%q is not a valid start token", t) + err = fmt.Errorf("%q is not a valid start token", t) } return EntryInvalid, err } @@ -388,7 +387,7 @@ func (p *PromParser) parseLVals() error { return parseError("expected label value", t) } if !utf8.Valid(p.l.buf()) { - return errors.Errorf("invalid UTF-8 label value") + return fmt.Errorf("invalid UTF-8 label value") } // The promlexer ensures the value string is quoted. Strip first diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 78da1129ac8..99886bd4bc3 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -302,12 +302,22 @@ func (n *Manager) nextBatch() []*Alert { // Run dispatches notifications continuously. func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { for { + // The select is split in two parts, such as we will first try to read + // new alertmanager targets if they are available, before sending new + // alerts. select { case <-n.ctx.Done(): return case ts := <-tsets: n.reload(ts) - case <-n.more: + default: + select { + case <-n.ctx.Done(): + return + case ts := <-tsets: + n.reload(ts) + case <-n.more: + } } alerts := n.nextBatch() @@ -351,7 +361,7 @@ func (n *Manager) Send(alerts ...*Alert) { } } - a.Labels = lb.Labels() + a.Labels = lb.Labels(a.Labels) } alerts = n.relabelAlerts(alerts) diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 3cb1c95683c..b6842e8f47d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -128,6 +128,8 @@ type Query interface { type QueryOpts struct { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. EnablePerStepStats bool + // Lookback delta duration for this query. + LookbackDelta time.Duration } // query implements the Query interface. @@ -437,11 +439,17 @@ func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Exp opts = &QueryOpts{} } + lookbackDelta := opts.LookbackDelta + if lookbackDelta <= 0 { + lookbackDelta = ng.lookbackDelta + } + es := &parser.EvalStmt{ - Expr: PreprocessExpr(expr, start, end), - Start: start, - End: end, - Interval: interval, + Expr: PreprocessExpr(expr, start, end), + Start: start, + End: end, + Interval: interval, + LookbackDelta: lookbackDelta, } qry := &query{ stmt: es, @@ -636,7 +644,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, - lookbackDelta: ng.lookbackDelta, + lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, } @@ -688,7 +696,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, - lookbackDelta: ng.lookbackDelta, + lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, } @@ -801,7 +809,7 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS } if evalRange == 0 { - start = start - durationMilliseconds(ng.lookbackDelta) + start = start - durationMilliseconds(s.LookbackDelta) } else { // For all matrix queries we want to ensure that we have (end-start) + range selected // this way we have `range` data before the start time @@ -937,7 +945,7 @@ func (ev *evaluator) error(err error) { } // recover is the handler that turns panics into returns from the top level of evaluation. -func (ev *evaluator) recover(ws *storage.Warnings, errp *error) { +func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) { e := recover() if e == nil { return @@ -949,7 +957,7 @@ func (ev *evaluator) recover(ws *storage.Warnings, errp *error) { buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf)) + level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err @@ -960,7 +968,7 @@ func (ev *evaluator) recover(ws *storage.Warnings, errp *error) { } func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) { - defer ev.recover(&ws, &err) + defer ev.recover(expr, &ws, &err) v, ws = ev.eval(expr) return v, ws, nil @@ -2108,7 +2116,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels() + ret := enh.lb.Labels(nil) enh.resultMetric[str] = ret return ret } @@ -2148,7 +2156,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels() + return labels.NewBuilder(l).Del(labels.MetricName).Labels(nil) } // scalarBinop evaluates a binary operation between two Scalars. @@ -2272,7 +2280,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without if op == parser.COUNT_VALUES { lb.Reset(metric) lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = lb.Labels() + metric = lb.Labels(nil) // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2296,7 +2304,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } else { lb.Keep(grouping...) } - m := lb.Labels() + m := lb.Labels(nil) newAgg := &groupedAggregation{ labels: m, value: s.V, diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index d133fef051d..e25f3f119e1 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -31,17 +31,23 @@ import ( // FunctionCall is the type of a PromQL function implementation // // vals is a list of the evaluated arguments for the function call. -// For range vectors it will be a Matrix with one series, instant vectors a -// Vector, scalars a Vector with one series whose value is the scalar -// value,and nil for strings. +// +// For range vectors it will be a Matrix with one series, instant vectors a +// Vector, scalars a Vector with one series whose value is the scalar +// value,and nil for strings. +// // args are the original arguments to the function, where you can access -// matrixSelectors, vectorSelectors, and StringLiterals. +// matrixSelectors, vectorSelectors, and StringLiterals. +// // enh.Out is a pre-allocated empty vector that you may use to accumulate -// output before returning it. The vectors in vals should not be returned.a +// output before returning it. The vectors in vals should not be returned.a +// // Range vector functions need only return a vector with the right value, -// the metric and timestamp are not needed. +// the metric and timestamp are not needed. +// // Instant vector functions need only return a vector with the right values and -// metrics, the timestamp are not needed. +// metrics, the timestamp are not needed. +// // Scalar results should be returned as the value of a sample in a Vector. type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector @@ -813,7 +819,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if !ok { el.Metric = labels.NewBuilder(el.Metric). Del(excludedLabels...). - Labels() + Labels(nil) mb = &metricWithBuckets{el.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb @@ -912,7 +918,7 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels() + outMetric = lb.Labels(nil) enh.Dmn[h] = outMetric } } @@ -980,7 +986,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels() + outMetric = lb.Labels(nil) enh.Dmn[h] = outMetric } @@ -1233,14 +1239,14 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { continue } if ma.Type == labels.MatchEqual && !m.Has(ma.Name) { - m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels() + m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels(nil) } else { empty = append(empty, ma.Name) } } for _, v := range empty { - m = labels.NewBuilder(m).Del(v).Labels() + m = labels.NewBuilder(m).Del(v).Labels(nil) } return m } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index d5a2335abb5..190af2d5905 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -28,18 +28,22 @@ import ( // or a chain of function definitions (e.g. String(), PromQLExpr(), etc.) convention is // to list them as follows: // -// - Statements -// - statement types (alphabetical) -// - ... -// - Expressions -// - expression types (alphabetical) -// - ... -// +// - Statements +// - statement types (alphabetical) +// - ... +// - Expressions +// - expression types (alphabetical) +// - ... type Node interface { // String representation of the node that returns the given node when parsed // as part of a valid query. String() string + // Pretty returns the prettified representation of the node. + // It uses the level information to determine at which level/depth the current + // node is in the AST and uses this to apply indentation. + Pretty(level int) string + // PositionRange returns the position of the AST Node in the query string. PositionRange() PositionRange } @@ -63,6 +67,8 @@ type EvalStmt struct { Start, End time.Time // Time between two evaluated instants for the range [Start:End]. Interval time.Duration + // Lookback delta to use for this evaluation. + LookbackDelta time.Duration } func (*EvalStmt) PromQLStmt() {} @@ -205,8 +211,9 @@ type VectorSelector struct { // of an arbitrary function during handling. It is used to test the Engine. type TestStmt func(context.Context) error -func (TestStmt) String() string { return "test statement" } -func (TestStmt) PromQLStmt() {} +func (TestStmt) String() string { return "test statement" } +func (TestStmt) PromQLStmt() {} +func (t TestStmt) Pretty(int) string { return t.String() } func (TestStmt) PositionRange() PositionRange { return PositionRange{ diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go index e1dee335645..7b6a6b027bc 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go @@ -48,6 +48,10 @@ func (i Item) String() string { return fmt.Sprintf("%q", i.Val) } +// Pretty returns the prettified form of an item. +// This is same as the item's stringified format. +func (i Item) Pretty(int) string { return i.String() } + // IsOperator returns true if the Item corresponds to a arithmetic or set operator. // Returns false otherwise. func (i ItemType) IsOperator() bool { return i > operatorsStart && i < operatorsEnd } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go new file mode 100644 index 00000000000..9870d6da748 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier.go @@ -0,0 +1,166 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "strings" +) + +// Approach +// -------- +// When a PromQL query is parsed, it is converted into PromQL AST, +// which is a nested structure of nodes. Each node has a depth/level +// (distance from the root), that is passed by its parent. +// +// While prettifying, a Node considers 2 things: +// 1. Did the current Node's parent add a new line? +// 2. Does the current Node needs to be prettified? +// +// The level of a Node determines if it should be indented or not. +// The answer to the 1 is NO if the level passed is 0. This means, the +// parent Node did not apply a new line, so the current Node must not +// apply any indentation as prefix. +// If level > 1, a new line is applied by the parent. So, the current Node +// should prefix an indentation before writing any of its content. This indentation +// will be ([level/depth of current Node] * " "). +// +// The answer to 2 is YES if the normalized length of the current Node exceeds +// the maxCharactersPerLine limit. Hence, it applies the indentation equal to +// its depth and increments the level by 1 before passing down the child. +// If the answer is NO, the current Node returns the normalized string value of itself. + +var maxCharactersPerLine = 100 + +func Prettify(n Node) string { + return n.Pretty(0) +} + +func (e *AggregateExpr) Pretty(level int) string { + s := indent(level) + if !needsSplit(e) { + s += e.String() + return s + } + + s += e.getAggOpStr() + s += "(\n" + + if e.Op.IsAggregatorWithParam() { + s += fmt.Sprintf("%s,\n", e.Param.Pretty(level+1)) + } + s += fmt.Sprintf("%s\n%s)", e.Expr.Pretty(level+1), indent(level)) + return s +} + +func (e *BinaryExpr) Pretty(level int) string { + s := indent(level) + if !needsSplit(e) { + s += e.String() + return s + } + returnBool := "" + if e.ReturnBool { + returnBool = " bool" + } + + matching := e.getMatchingStr() + return fmt.Sprintf("%s\n%s%s%s%s\n%s", e.LHS.Pretty(level+1), indent(level), e.Op, returnBool, matching, e.RHS.Pretty(level+1)) +} + +func (e *Call) Pretty(level int) string { + s := indent(level) + if !needsSplit(e) { + s += e.String() + return s + } + s += fmt.Sprintf("%s(\n%s\n%s)", e.Func.Name, e.Args.Pretty(level+1), indent(level)) + return s +} + +func (e *EvalStmt) Pretty(_ int) string { + return "EVAL " + e.Expr.String() +} + +func (e Expressions) Pretty(level int) string { + // Do not prefix the indent since respective nodes will indent itself. + s := "" + for i := range e { + s += fmt.Sprintf("%s,\n", e[i].Pretty(level)) + } + return s[:len(s)-2] +} + +func (e *ParenExpr) Pretty(level int) string { + s := indent(level) + if !needsSplit(e) { + s += e.String() + return s + } + return fmt.Sprintf("%s(\n%s\n%s)", s, e.Expr.Pretty(level+1), indent(level)) +} + +func (e *StepInvariantExpr) Pretty(level int) string { + return e.Expr.Pretty(level) +} + +func (e *MatrixSelector) Pretty(level int) string { + return getCommonPrefixIndent(level, e) +} + +func (e *SubqueryExpr) Pretty(level int) string { + if !needsSplit(e) { + return e.String() + } + return fmt.Sprintf("%s%s", e.Expr.Pretty(level), e.getSubqueryTimeSuffix()) +} + +func (e *VectorSelector) Pretty(level int) string { + return getCommonPrefixIndent(level, e) +} + +func (e *NumberLiteral) Pretty(level int) string { + return getCommonPrefixIndent(level, e) +} + +func (e *StringLiteral) Pretty(level int) string { + return getCommonPrefixIndent(level, e) +} + +func (e *UnaryExpr) Pretty(level int) string { + child := e.Expr.Pretty(level) + // Remove the indent prefix from child since we attach the prefix indent before Op. + child = strings.TrimSpace(child) + return fmt.Sprintf("%s%s%s", indent(level), e.Op, child) +} + +func getCommonPrefixIndent(level int, current Node) string { + return fmt.Sprintf("%s%s", indent(level), current.String()) +} + +// needsSplit normalizes the node and then checks if the node needs any split. +// This is necessary to remove any trailing whitespaces. +func needsSplit(n Node) bool { + if n == nil { + return false + } + return len(n.String()) > maxCharactersPerLine +} + +const indentString = " " + +// indent adds the indentString n number of times. +func indent(n int) string { + return strings.Repeat(indentString, n) +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/prettier_rules.md b/vendor/github.com/prometheus/prometheus/promql/parser/prettier_rules.md new file mode 100644 index 00000000000..46c5e51ef9b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/parser/prettier_rules.md @@ -0,0 +1,16 @@ +# Prettifying PromQL expressions +This files contains rules for prettifying PromQL expressions. + +Note: The current version of prettier does not preserve comments. + +### Keywords +`max_characters_per_line`: Maximum number of characters that will be allowed on a single line in a prettified PromQL expression. + +### Rules +1. A node exceeding the `max_characters_per_line` will qualify for split unless + 1. It is a `MatrixSelector` + 2. It is a `VectorSelector`. Label sets in a `VectorSelector` will be in the same line as metric_name, separated by commas and a space + Note: Label groupings like `by`, `without`, `on`, `ignoring` will remain on the same line as their parent node +2. Nodes that are nested within another node will be prettified only if they exceed the `max_characters_per_line` +3. Expressions like `sum(expression) without (label_matchers)` will be modified to `sum without(label_matchers) (expression)` +4. Functional call args will be split to different lines if they exceed the `max_characters_per_line` diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go index b21444cbf8d..1f15eeef33e 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go @@ -62,16 +62,7 @@ func (es Expressions) String() (s string) { } func (node *AggregateExpr) String() string { - aggrString := node.Op.String() - - if node.Without { - aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", ")) - } else { - if len(node.Grouping) > 0 { - aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", ")) - } - } - + aggrString := node.getAggOpStr() aggrString += "(" if node.Op.IsAggregatorWithParam() { aggrString += fmt.Sprintf("%s, ", node.Param) @@ -81,31 +72,48 @@ func (node *AggregateExpr) String() string { return aggrString } +func (node *AggregateExpr) getAggOpStr() string { + aggrString := node.Op.String() + + switch { + case node.Without: + aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", ")) + case len(node.Grouping) > 0: + aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", ")) + } + + return aggrString +} + func (node *BinaryExpr) String() string { returnBool := "" if node.ReturnBool { returnBool = " bool" } + matching := node.getMatchingStr() + return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS) +} + +func (node *BinaryExpr) getMatchingStr() string { matching := "" vm := node.VectorMatching if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) { + vmTag := "ignoring" if vm.On { - matching = fmt.Sprintf(" on(%s)", strings.Join(vm.MatchingLabels, ", ")) - } else { - matching = fmt.Sprintf(" ignoring(%s)", strings.Join(vm.MatchingLabels, ", ")) + vmTag = "on" } + matching = fmt.Sprintf(" %s (%s)", vmTag, strings.Join(vm.MatchingLabels, ", ")) + if vm.Card == CardManyToOne || vm.Card == CardOneToMany { - matching += " group_" + vmCard := "right" if vm.Card == CardManyToOne { - matching += "left" - } else { - matching += "right" + vmCard = "left" } - matching += fmt.Sprintf("(%s)", strings.Join(vm.Include, ", ")) + matching += fmt.Sprintf(" group_%s (%s)", vmCard, strings.Join(vm.Include, ", ")) } } - return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS) + return matching } func (node *Call) String() string { @@ -144,6 +152,11 @@ func (node *MatrixSelector) String() string { } func (node *SubqueryExpr) String() string { + return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix()) +} + +// getSubqueryTimeSuffix returns the '[:] @ offset ' suffix of the subquery. +func (node *SubqueryExpr) getSubqueryTimeSuffix() string { step := "" if node.Step != 0 { step = model.Duration(node.Step).String() @@ -162,7 +175,7 @@ func (node *SubqueryExpr) String() string { } else if node.StartOrEnd == END { at = " @ end()" } - return fmt.Sprintf("%s[%s:%s]%s%s", node.Expr.String(), model.Duration(node.Range), step, at, offset) + return fmt.Sprintf("[%s:%s]%s%s", model.Duration(node.Range), step, at, offset) } func (node *NumberLiteral) String() string { diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go index 2c324fb90b6..716e7749b9d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/query_logger.go +++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go @@ -64,6 +64,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { level.Error(logger).Log("msg", "Failed to open query log file", "err", err) return } + defer fd.Close() brokenJSON := make([]byte, filesize) _, err = fd.Read(brokenJSON) diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index 22144fe3587..41526ccdc5f 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -785,7 +785,7 @@ func (ll *LazyLoader) QueryEngine() *Engine { // Queryable allows querying the LazyLoader's data. // Note: only the samples till the max timestamp used -// in `WithSamplesTill` can be queried. +// in `WithSamplesTill` can be queried. func (ll *LazyLoader) Queryable() storage.Queryable { return ll.storage } diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index 5b0bd8fa823..c3d3bffdb97 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -138,15 +138,22 @@ func (vec Vector) String() string { // Such a behavior is semantically undefined // https://github.com/prometheus/prometheus/issues/4562 func (vec Vector) ContainsSameLabelset() bool { - l := make(map[uint64]struct{}, len(vec)) - for _, s := range vec { - hash := s.Metric.Hash() - if _, ok := l[hash]; ok { - return true + switch len(vec) { + case 0, 1: + return false + case 2: + return vec[0].Metric.Hash() == vec[1].Metric.Hash() + default: + l := make(map[uint64]struct{}, len(vec)) + for _, ss := range vec { + hash := ss.Metric.Hash() + if _, ok := l[hash]; ok { + return true + } + l[hash] = struct{}{} } - l[hash] = struct{}{} + return false } - return false } // Matrix is a slice of Series that implements sort.Interface and @@ -181,15 +188,22 @@ func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } // Such a behavior is semantically undefined. // https://github.com/prometheus/prometheus/issues/4562 func (m Matrix) ContainsSameLabelset() bool { - l := make(map[uint64]struct{}, len(m)) - for _, ss := range m { - hash := ss.Metric.Hash() - if _, ok := l[hash]; ok { - return true + switch len(m) { + case 0, 1: + return false + case 2: + return m[0].Metric.Hash() == m[1].Metric.Hash() + default: + l := make(map[uint64]struct{}, len(m)) + for _, ss := range m { + hash := ss.Metric.Hash() + if _, ok := l[hash]; ok { + return true + } + l[hash] = struct{}{} } - l[hash] = struct{}{} + return false } - return false } // Result holds the resulting value of an execution or an error diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index 7fd5c68d78b..4cfb1fa85f7 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -23,9 +23,9 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/common/model" - yaml "gopkg.in/yaml.v2" + "go.uber.org/atomic" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" @@ -69,7 +69,7 @@ func (s AlertState) String() string { case StateFiring: return "firing" } - panic(errors.Errorf("unknown alert state: %d", s)) + panic(fmt.Errorf("unknown alert state: %d", s)) } // Alert is the user-level representation of a single instance of an alerting rule. @@ -122,17 +122,17 @@ type AlertingRule struct { externalURL string // true if old state has been restored. We start persisting samples for ALERT_FOR_STATE // only after the restoration. - restored bool - // Protects the below. - mtx sync.Mutex + restored *atomic.Bool // Time in seconds taken to evaluate rule. - evaluationDuration time.Duration + evaluationDuration *atomic.Duration // Timestamp of last evaluation of rule. - evaluationTimestamp time.Time + evaluationTimestamp *atomic.Time // The health of the alerting rule. - health RuleHealth + health *atomic.String // The last error seen by the alerting rule. - lastError error + lastError *atomic.Error + // activeMtx Protects the `active` map. + activeMtx sync.Mutex // A map of alerts which are currently active (Pending or Firing), keyed by // the fingerprint of the labelset they correspond to. active map[uint64]*Alert @@ -152,17 +152,20 @@ func NewAlertingRule( } return &AlertingRule{ - name: name, - vector: vec, - holdDuration: hold, - labels: labels, - annotations: annotations, - externalLabels: el, - externalURL: externalURL, - health: HealthUnknown, - active: map[uint64]*Alert{}, - logger: logger, - restored: restored, + name: name, + vector: vec, + holdDuration: hold, + labels: labels, + annotations: annotations, + externalLabels: el, + externalURL: externalURL, + active: map[uint64]*Alert{}, + logger: logger, + restored: atomic.NewBool(restored), + health: atomic.NewString(string(HealthUnknown)), + evaluationTimestamp: atomic.NewTime(time.Time{}), + evaluationDuration: atomic.NewDuration(0), + lastError: atomic.NewError(nil), } } @@ -173,30 +176,22 @@ func (r *AlertingRule) Name() string { // SetLastError sets the current error seen by the alerting rule. func (r *AlertingRule) SetLastError(err error) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.lastError = err + r.lastError.Store(err) } // LastError returns the last error seen by the alerting rule. func (r *AlertingRule) LastError() error { - r.mtx.Lock() - defer r.mtx.Unlock() - return r.lastError + return r.lastError.Load() } // SetHealth sets the current health of the alerting rule. func (r *AlertingRule) SetHealth(health RuleHealth) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.health = health + r.health.Store(string(health)) } // Health returns the current health of the alerting rule. func (r *AlertingRule) Health() RuleHealth { - r.mtx.Lock() - defer r.mtx.Unlock() - return r.health + return RuleHealth(r.health.String()) } // Query returns the query expression of the alerting rule. @@ -231,7 +226,7 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample { lb.Set(alertStateLabel, alert.State.String()) s := promql.Sample{ - Metric: lb.Labels(), + Metric: lb.Labels(nil), Point: promql.Point{T: timestamp.FromTime(ts), V: 1}, } return s @@ -249,7 +244,7 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro lb.Set(labels.AlertName, r.name) s := promql.Sample{ - Metric: lb.Labels(), + Metric: lb.Labels(nil), Point: promql.Point{T: timestamp.FromTime(ts), V: v}, } return s @@ -284,42 +279,32 @@ func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (sto // SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation. func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.evaluationDuration = dur + r.evaluationDuration.Store(dur) } // GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule. func (r *AlertingRule) GetEvaluationDuration() time.Duration { - r.mtx.Lock() - defer r.mtx.Unlock() - return r.evaluationDuration + return r.evaluationDuration.Load() } // SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated. func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.evaluationTimestamp = ts + r.evaluationTimestamp.Store(ts) } // GetEvaluationTimestamp returns the time the evaluation took place. func (r *AlertingRule) GetEvaluationTimestamp() time.Time { - r.mtx.Lock() - defer r.mtx.Unlock() - return r.evaluationTimestamp + return r.evaluationTimestamp.Load() } // SetRestored updates the restoration state of the alerting rule. func (r *AlertingRule) SetRestored(restored bool) { - r.restored = restored + r.restored.Store(restored) } // Restored returns the restoration state of the alerting rule. func (r *AlertingRule) Restored() bool { - r.mtx.Lock() - defer r.mtx.Unlock() - return r.restored + return r.restored.Load() } // resolvedRetention is the duration for which a resolved alert instance @@ -334,9 +319,6 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, return nil, err } - r.mtx.Lock() - defer r.mtx.Unlock() - // Create pending alerts for any new vector elements in the alert expression // or update the expression value for existing elements. resultFPs := map[uint64]struct{}{} @@ -391,7 +373,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)}) } - lbs := lb.Labels() + lbs := lb.Labels(nil) h := lbs.Hash() resultFPs[h] = struct{}{} @@ -408,6 +390,9 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, } } + r.activeMtx.Lock() + defer r.activeMtx.Unlock() + for h, a := range alerts { // Check whether we already have alerting state for the identifying label set. // Update the last value and annotations if so, create a new alert entry otherwise. @@ -442,7 +427,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, a.FiredAt = ts } - if r.restored { + if r.restored.Load() { vec = append(vec, r.sample(a, ts)) vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix()))) } @@ -450,7 +435,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, if limit > 0 && numActivePending > limit { r.active = map[uint64]*Alert{} - return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending) + return nil, fmt.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending) } return vec, nil @@ -459,8 +444,8 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, // State returns the maximum state of alert instances for this rule. // StateFiring > StatePending > StateInactive func (r *AlertingRule) State() AlertState { - r.mtx.Lock() - defer r.mtx.Unlock() + r.activeMtx.Lock() + defer r.activeMtx.Unlock() maxState := StateInactive for _, a := range r.active { @@ -485,8 +470,8 @@ func (r *AlertingRule) ActiveAlerts() []*Alert { // currentAlerts returns all instances of alerts for this rule. This may include // inactive alerts that were previously firing. func (r *AlertingRule) currentAlerts() []*Alert { - r.mtx.Lock() - defer r.mtx.Unlock() + r.activeMtx.Lock() + defer r.activeMtx.Unlock() alerts := make([]*Alert, 0, len(r.active)) @@ -502,8 +487,8 @@ func (r *AlertingRule) currentAlerts() []*Alert { // and not on its copy. // If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'. func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { - r.mtx.Lock() - defer r.mtx.Unlock() + r.activeMtx.Lock() + defer r.activeMtx.Unlock() for _, a := range r.active { f(a) @@ -522,6 +507,8 @@ func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay } alert.ValidUntil = ts.Add(4 * delta) anew := *alert + // The notifier re-uses the labels slice, hence make a copy. + anew.Labels = alert.Labels.Copy() alerts = append(alerts, &anew) } }) diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 4fdc1357962..1ed46959640 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -15,6 +15,8 @@ package rules import ( "context" + "errors" + "fmt" "math" "net/url" "sort" @@ -23,7 +25,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.opentelemetry.io/otel" @@ -34,9 +35,11 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/strutil" ) // RuleHealth describes the health state of a rule. @@ -630,7 +633,8 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Canceled queries are intentional termination of queries. This normally // happens on shutdown and thus we skip logging of any errors here. - if _, ok := err.(promql.ErrQueryCanceled); !ok { + var eqc promql.ErrQueryCanceled + if !errors.As(err, &eqc) { level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Evaluating rule failed", "rule", rule, "err", err) } return @@ -667,12 +671,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { rule.SetHealth(HealthBad) rule.SetLastError(err) sp.SetStatus(codes.Error, err.Error()) - - switch errors.Cause(err) { - case storage.ErrOutOfOrderSample: + unwrappedErr := errors.Unwrap(err) + switch { + case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s) - case storage.ErrDuplicateSampleForTimestamp: + case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): numDuplicates++ level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s) default: @@ -694,9 +698,10 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if _, ok := seriesReturned[metric]; !ok { // Series no longer exposed, mark it stale. _, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) - switch errors.Cause(err) { - case nil: - case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + unwrappedErr := errors.Unwrap(err) + switch { + case unwrappedErr == nil: + case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: @@ -720,9 +725,10 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { for _, s := range g.staleSeries { // Rule that produced series no longer configured, mark it stale. _, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) - switch errors.Cause(err) { - case nil: - case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + unwrappedErr := errors.Unwrap(err) + switch { + case unwrappedErr == nil: + case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: @@ -1074,7 +1080,7 @@ func (m *Manager) LoadGroups( for _, r := range rg.Rules { expr, err := m.opts.GroupLoader.Parse(r.Expr.Value) if err != nil { - return nil, []error{errors.Wrap(err, fn)} + return nil, []error{fmt.Errorf("%s: %w", fn, err)} } if r.Alert.Value != "" { @@ -1164,3 +1170,33 @@ func (m *Manager) AlertingRules() []*AlertingRule { return alerts } + +type Sender interface { + Send(alerts ...*notifier.Alert) +} + +// SendAlerts implements the rules.NotifyFunc for a Notifier. +func SendAlerts(s Sender, externalURL string) NotifyFunc { + return func(ctx context.Context, expr string, alerts ...*Alert) { + var res []*notifier.Alert + + for _, alert := range alerts { + a := ¬ifier.Alert{ + StartsAt: alert.FiredAt, + Labels: alert.Labels, + Annotations: alert.Annotations, + GeneratorURL: externalURL + strutil.TableLinkForExpression(expr), + } + if !alert.ResolvedAt.IsZero() { + a.EndsAt = alert.ResolvedAt + } else { + a.EndsAt = alert.ValidUntil + } + res = append(res, a) + } + + if len(alerts) > 0 { + s.Send(res...) + } + } +} diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go index 2ce15979f78..35d49603294 100644 --- a/vendor/github.com/prometheus/prometheus/rules/recording.go +++ b/vendor/github.com/prometheus/prometheus/rules/recording.go @@ -17,10 +17,10 @@ import ( "context" "fmt" "net/url" - "sync" "time" - yaml "gopkg.in/yaml.v2" + "go.uber.org/atomic" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" @@ -33,25 +33,26 @@ type RecordingRule struct { name string vector parser.Expr labels labels.Labels - // Protects the below. - mtx sync.Mutex // The health of the recording rule. - health RuleHealth + health *atomic.String // Timestamp of last evaluation of the recording rule. - evaluationTimestamp time.Time + evaluationTimestamp *atomic.Time // The last error seen by the recording rule. - lastError error + lastError *atomic.Error // Duration of how long it took to evaluate the recording rule. - evaluationDuration time.Duration + evaluationDuration *atomic.Duration } // NewRecordingRule returns a new recording rule. func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *RecordingRule { return &RecordingRule{ - name: name, - vector: vector, - health: HealthUnknown, - labels: lset, + name: name, + vector: vector, + labels: lset, + health: atomic.NewString(string(HealthUnknown)), + evaluationTimestamp: atomic.NewTime(time.Time{}), + evaluationDuration: atomic.NewDuration(0), + lastError: atomic.NewError(nil), } } @@ -88,7 +89,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFu lb.Set(l.Name, l.Value) } - sample.Metric = lb.Labels() + sample.Metric = lb.Labels(nil) } // Check that the rule does not produce identical metrics after applying @@ -124,56 +125,40 @@ func (rule *RecordingRule) String() string { // SetEvaluationDuration updates evaluationDuration to the time in seconds it took to evaluate the rule on its last evaluation. func (rule *RecordingRule) SetEvaluationDuration(dur time.Duration) { - rule.mtx.Lock() - defer rule.mtx.Unlock() - rule.evaluationDuration = dur + rule.evaluationDuration.Store(dur) } // SetLastError sets the current error seen by the recording rule. func (rule *RecordingRule) SetLastError(err error) { - rule.mtx.Lock() - defer rule.mtx.Unlock() - rule.lastError = err + rule.lastError.Store(err) } // LastError returns the last error seen by the recording rule. func (rule *RecordingRule) LastError() error { - rule.mtx.Lock() - defer rule.mtx.Unlock() - return rule.lastError + return rule.lastError.Load() } // SetHealth sets the current health of the recording rule. func (rule *RecordingRule) SetHealth(health RuleHealth) { - rule.mtx.Lock() - defer rule.mtx.Unlock() - rule.health = health + rule.health.Store(string(health)) } // Health returns the current health of the recording rule. func (rule *RecordingRule) Health() RuleHealth { - rule.mtx.Lock() - defer rule.mtx.Unlock() - return rule.health + return RuleHealth(rule.health.Load()) } // GetEvaluationDuration returns the time in seconds it took to evaluate the recording rule. func (rule *RecordingRule) GetEvaluationDuration() time.Duration { - rule.mtx.Lock() - defer rule.mtx.Unlock() - return rule.evaluationDuration + return rule.evaluationDuration.Load() } // SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated. func (rule *RecordingRule) SetEvaluationTimestamp(ts time.Time) { - rule.mtx.Lock() - defer rule.mtx.Unlock() - rule.evaluationTimestamp = ts + rule.evaluationTimestamp.Store(ts) } // GetEvaluationTimestamp returns the time the evaluation took place. func (rule *RecordingRule) GetEvaluationTimestamp() time.Time { - rule.mtx.Lock() - defer rule.mtx.Unlock() - return rule.evaluationTimestamp + return rule.evaluationTimestamp.Load() } diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 92ecbc31d4d..d8a0ce72f9c 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -124,10 +124,14 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager // Options are the configuration parameters to the scrape manager. type Options struct { - ExtraMetrics bool + ExtraMetrics bool + NoDefaultPort bool // Option used by downstream scraper users like OpenTelemetry Collector // to help lookup metric metadata. Should be false for Prometheus. PassMetadataInContext bool + // Option to enable the experimental in-memory metadata storage and append + // metadata to the WAL. + EnableMetadataStorage bool // Option to increase the interval used by scrape manager to throttle target groups updates. DiscoveryReloadInterval model.Duration @@ -207,7 +211,7 @@ func (m *Manager) reload() { level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) continue } - sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics, m.opts.PassMetadataInContext, m.opts.HTTPClientOptions) + sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts) if err != nil { level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) continue diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 2febb4390ae..a84e8cbea01 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -41,6 +41,7 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/timestamp" @@ -239,6 +240,8 @@ type scrapePool struct { // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop + + noDefaultPort bool } type labelLimits struct { @@ -264,13 +267,13 @@ const maxAheadTime = 10 * time.Minute type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics, passMetadataInContext bool, httpOpts []config_util.HTTPClientOption) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) { targetScrapePools.Inc() if logger == nil { logger = log.NewNopLogger() } - client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, httpOpts...) + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) if err != nil { targetScrapePoolsFailed.Inc() return nil, errors.Wrap(err, "error creating HTTP client") @@ -287,7 +290,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed activeTargets: map[uint64]*Target{}, loops: map[uint64]loop{}, logger: logger, - httpOpts: httpOpts, + httpOpts: options.HTTPClientOptions, + noDefaultPort: options.NoDefaultPort, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -314,10 +318,10 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed opts.labelLimits, opts.interval, opts.timeout, - reportExtraMetrics, + options.ExtraMetrics, + options.EnableMetadataStorage, opts.target, - cache, - passMetadataInContext, + options.PassMetadataInContext, ) } @@ -426,8 +430,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { cache = newScrapeCache() } + t := sp.activeTargets[fp] + interval, timeout, err := t.intervalAndTimeout(interval, timeout) var ( - t = sp.activeTargets[fp] s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit} newLoop = sp.newLoop(scrapeLoopOptions{ target: t, @@ -442,6 +447,9 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { timeout: timeout, }) ) + if err != nil { + newLoop.setForcedError(err) + } wg.Add(1) go func(oldLoop, newLoop loop) { @@ -476,7 +484,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { var all []*Target sp.droppedTargets = []*Target{} for _, tg := range tgs { - targets, failures := TargetsFromGroup(tg, sp.config) + targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort) for _, err := range failures { level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) } @@ -669,7 +677,7 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re } } - res := lb.Labels() + res := lb.Labels(nil) if len(rc) > 0 { res = relabel.Process(res, rc...) @@ -709,7 +717,7 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels lb.Set(l.Name, l.Value) } - return lb.Labels() + return lb.Labels(nil) } // appender returns an appender for ingested samples from the target. @@ -866,7 +874,8 @@ type scrapeLoop struct { disabledEndOfRunStalenessMarkers bool - reportExtraMetrics bool + reportExtraMetrics bool + appendMetadataToWAL bool } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -899,15 +908,15 @@ type scrapeCache struct { // metaEntry holds meta information about a metric. type metaEntry struct { - lastIter uint64 // Last scrape iteration the entry was observed at. - typ textparse.MetricType - help string - unit string + metadata.Metadata + + lastIter uint64 // Last scrape iteration the entry was observed at. + lastIterChange uint64 // Last scrape iteration the entry was changed at. } func (m *metaEntry) size() int { // The attribute lastIter although part of the struct it is not metadata. - return len(m.help) + len(m.unit) + len(m.typ) + return len(m.Help) + len(m.Unit) + len(m.Type) } func newScrapeCache() *scrapeCache { @@ -1020,10 +1029,13 @@ func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { e, ok := c.metadata[yoloString(metric)] if !ok { - e = &metaEntry{typ: textparse.MetricTypeUnknown} + e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} c.metadata[string(metric)] = e } - e.typ = t + if e.Type != t { + e.Type = t + e.lastIterChange = c.iter + } e.lastIter = c.iter c.metaMtx.Unlock() @@ -1034,11 +1046,12 @@ func (c *scrapeCache) setHelp(metric, help []byte) { e, ok := c.metadata[yoloString(metric)] if !ok { - e = &metaEntry{typ: textparse.MetricTypeUnknown} + e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} c.metadata[string(metric)] = e } - if e.help != yoloString(help) { - e.help = string(help) + if e.Help != yoloString(help) { + e.Help = string(help) + e.lastIterChange = c.iter } e.lastIter = c.iter @@ -1050,11 +1063,12 @@ func (c *scrapeCache) setUnit(metric, unit []byte) { e, ok := c.metadata[yoloString(metric)] if !ok { - e = &metaEntry{typ: textparse.MetricTypeUnknown} + e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}} c.metadata[string(metric)] = e } - if e.unit != yoloString(unit) { - e.unit = string(unit) + if e.Unit != yoloString(unit) { + e.Unit = string(unit) + e.lastIterChange = c.iter } e.lastIter = c.iter @@ -1071,9 +1085,9 @@ func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) { } return MetricMetadata{ Metric: metric, - Type: m.typ, - Help: m.help, - Unit: m.unit, + Type: m.Type, + Help: m.Help, + Unit: m.Unit, }, true } @@ -1086,9 +1100,9 @@ func (c *scrapeCache) ListMetadata() []MetricMetadata { for m, e := range c.metadata { res = append(res, MetricMetadata{ Metric: m, - Type: e.typ, - Help: e.help, - Unit: e.unit, + Type: e.Type, + Help: e.Help, + Unit: e.Unit, }) } return res @@ -1128,8 +1142,8 @@ func newScrapeLoop(ctx context.Context, interval time.Duration, timeout time.Duration, reportExtraMetrics bool, + appendMetadataToWAL bool, target *Target, - metricMetadataStore MetricMetadataStore, passMetadataInContext bool, ) *scrapeLoop { if l == nil { @@ -1171,6 +1185,7 @@ func newScrapeLoop(ctx context.Context, interval: interval, timeout: timeout, reportExtraMetrics: reportExtraMetrics, + appendMetadataToWAL: appendMetadataToWAL, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1450,12 +1465,36 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, } var ( - defTime = timestamp.FromTime(ts) - appErrs = appendErrors{} - sampleLimitErr error - e exemplar.Exemplar // escapes to heap so hoisted out of loop + defTime = timestamp.FromTime(ts) + appErrs = appendErrors{} + sampleLimitErr error + e exemplar.Exemplar // escapes to heap so hoisted out of loop + meta metadata.Metadata + metadataChanged bool ) + // updateMetadata updates the current iteration's metadata object and the + // metadataChanged value if we have metadata in the scrape cache AND the + // labelset is for a new series or the metadata for this series has just + // changed. It returns a boolean based on whether the metadata was updated. + updateMetadata := func(lset labels.Labels, isNewSeries bool) bool { + if !sl.appendMetadataToWAL { + return false + } + + sl.cache.metaMtx.Lock() + defer sl.cache.metaMtx.Unlock() + metaEntry, metaOk := sl.cache.metadata[yoloString([]byte(lset.Get(labels.MetricName)))] + if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) { + metadataChanged = true + meta.Type = metaEntry.Type + meta.Unit = metaEntry.Unit + meta.Help = metaEntry.Help + return true + } + return false + } + // Take an appender with limits. app = appender(app, sl.sampleLimit) @@ -1505,6 +1544,10 @@ loop: t = *tp } + // Zero metadata out for current iteration until it's resolved. + meta = metadata.Metadata{} + metadataChanged = false + if sl.cache.getDropped(yoloString(met)) { continue } @@ -1519,6 +1562,9 @@ loop: if ok { ref = ce.ref lset = ce.lset + + // Update metadata only if it changed in the current iteration. + updateMetadata(lset, false) } else { mets = p.Metric(&lset) hash = lset.Hash() @@ -1543,6 +1589,9 @@ loop: targetScrapePoolExceededLabelLimits.Inc() break loop } + + // Append metadata for new series if they were present. + updateMetadata(lset, true) } ref, err = app.Append(ref, lset, t, v) @@ -1582,6 +1631,12 @@ loop: e = exemplar.Exemplar{} // reset for next time round loop } + if sl.appendMetadataToWAL && metadataChanged { + if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { + // No need to fail the scrape on errors appending metadata. + level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) + } + } } if sampleLimitErr != nil { if err == nil { diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index e017a458419..f546239542c 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -351,7 +351,7 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, // PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { +func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -374,7 +374,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab } } - preRelabelLabels := lb.Labels() + preRelabelLabels := lb.Labels(nil) lset = relabel.Process(preRelabelLabels, cfg.RelabelConfigs...) // Check if the target was dropped. @@ -389,21 +389,25 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab // addPort checks whether we should add a default port to the address. // If the address is not valid, we don't append a port either. - addPort := func(s string) bool { + addPort := func(s string) (string, string, bool) { // If we can split, a port exists and we don't have to add one. - if _, _, err := net.SplitHostPort(s); err == nil { - return false + if host, port, err := net.SplitHostPort(s); err == nil { + return host, port, false } // If adding a port makes it valid, the previous error // was not due to an invalid address and we can append a port. _, _, err := net.SplitHostPort(s + ":1234") - return err == nil + return "", "", err == nil } + addr := lset.Get(model.AddressLabel) - // If it's an address with no trailing port, infer it based on the used scheme. - if addPort(addr) { + scheme := lset.Get(model.SchemeLabel) + host, port, add := addPort(addr) + // If it's an address with no trailing port, infer it based on the used scheme + // unless the no-default-scrape-port feature flag is present. + if !noDefaultPort && add { // Addresses reaching this point are already wrapped in [] if necessary. - switch lset.Get(model.SchemeLabel) { + switch scheme { case "http", "": addr = addr + ":80" case "https": @@ -414,6 +418,21 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab lb.Set(model.AddressLabel, addr) } + if noDefaultPort { + // If it's an address with a trailing default port and the + // no-default-scrape-port flag is present, remove the port. + switch port { + case "80": + if scheme == "http" { + lb.Set(model.AddressLabel, host) + } + case "443": + if scheme == "https" { + lb.Set(model.AddressLabel, host) + } + } + } + if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } @@ -453,7 +472,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab lb.Set(model.InstanceLabel, addr) } - res = lb.Labels() + res = lb.Labels(nil) for _, l := range res { // Check label values are valid, drop the target if not. if !model.LabelValue(l.Value).IsValid() { @@ -464,7 +483,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab } // TargetsFromGroup builds targets based on the given TargetGroup and config. -func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) { +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool) ([]*Target, []error) { targets := make([]*Target, 0, len(tg.Targets)) failures := []error{} @@ -482,7 +501,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Targe lset := labels.New(lbls...) - lbls, origLabels, err := PopulateLabels(lset, cfg) + lbls, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort) if err != nil { failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) } diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 2d0d33215fb..a51526f06e1 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" ) @@ -172,6 +173,20 @@ func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exempl return ref, nil } +func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { + ref, err := f.primary.UpdateMetadata(ref, l, m) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.UpdateMetadata(ref, l, m); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) Commit() (err error) { err = f.primary.Commit() diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 6ce147b08af..d73ec72203a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -20,16 +20,22 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" ) // The errors exposed. var ( - ErrNotFound = errors.New("not found") - ErrOutOfOrderSample = errors.New("out of order sample") + ErrNotFound = errors.New("not found") + // ErrOutOfOrderSample is when out of order support is disabled and the sample is out of order. + ErrOutOfOrderSample = errors.New("out of order sample") + // ErrOutOfBounds is when out of order support is disabled and the sample is older than the min valid time for the append. + ErrOutOfBounds = errors.New("out of bounds") + // ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed. + ErrTooOldSample = errors.New("too old sample") + // ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value. ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") - ErrOutOfBounds = errors.New("out of bounds") ErrOutOfOrderExemplar = errors.New("out of order exemplar") ErrDuplicateExemplar = errors.New("duplicate exemplar") ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) @@ -222,6 +228,7 @@ type Appender interface { // Appender has to be discarded after rollback. Rollback() error ExemplarAppender + MetadataUpdater } // GetRef is an extra interface on Appenders used by downstream projects @@ -250,6 +257,18 @@ type ExemplarAppender interface { AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error) } +// MetadataUpdater provides an interface for associating metadata to stored series. +type MetadataUpdater interface { + // UpdateMetadata updates a metadata entry for the given series and labels. + // A series reference number is returned which can be used to modify the + // metadata of the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to UpdateMetadata() at any point. If the series does not exist, + // UpdateMetadata returns an error. + // If the reference is 0 it must not be used for caching. + UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) +} + // SeriesSet contains a set of series. type SeriesSet interface { Next() bool diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index f7246c7c8ee..2f175d3e7ec 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -16,12 +16,11 @@ package storage import ( "bytes" "container/heap" + "fmt" "math" "sort" "sync" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -159,7 +158,7 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ func (q *mergeGenericQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { res, ws, err := q.lvals(q.queriers, name, matchers...) if err != nil { - return nil, nil, errors.Wrapf(err, "LabelValues() from merge generic querier for label %s", name) + return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) } return res, ws, nil } @@ -227,7 +226,7 @@ func (q *mergeGenericQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, warnings = append(warnings, wrn...) } if err != nil { - return nil, nil, errors.Wrap(err, "LabelNames() from merge generic querier") + return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) } for _, name := range names { labelNamesMap[name] = struct{}{} @@ -718,3 +717,56 @@ func (h *chunkIteratorHeap) Pop() interface{} { *h = old[0 : n-1] return x } + +// NewConcatenatingChunkSeriesMerger returns a VerticalChunkSeriesMergeFunc that simply concatenates the +// chunks from the series. The resultant stream of chunks for a series might be overlapping and unsorted. +func NewConcatenatingChunkSeriesMerger() VerticalChunkSeriesMergeFunc { + return func(series ...ChunkSeries) ChunkSeries { + if len(series) == 0 { + return nil + } + return &ChunkSeriesEntry{ + Lset: series[0].Labels(), + ChunkIteratorFn: func() chunks.Iterator { + iterators := make([]chunks.Iterator, 0, len(series)) + for _, s := range series { + iterators = append(iterators, s.Iterator()) + } + return &concatenatingChunkIterator{ + iterators: iterators, + } + }, + } + } +} + +type concatenatingChunkIterator struct { + iterators []chunks.Iterator + idx int + + curr chunks.Meta +} + +func (c *concatenatingChunkIterator) At() chunks.Meta { + return c.curr +} + +func (c *concatenatingChunkIterator) Next() bool { + if c.idx >= len(c.iterators) { + return false + } + if c.iterators[c.idx].Next() { + c.curr = c.iterators[c.idx].At() + return true + } + c.idx++ + return c.Next() +} + +func (c *concatenatingChunkIterator) Err() error { + errs := tsdb_errors.NewMulti() + for _, iter := range c.iterators { + errs.Add(iter.Err()) + } + return errs.Err() +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go b/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go index 670a16834f1..96ce483e0c8 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go @@ -16,13 +16,14 @@ package remote import ( "bufio" "encoding/binary" + "errors" + "fmt" "hash" "hash/crc32" "io" "net/http" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" ) // DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. @@ -119,7 +120,7 @@ func (r *ChunkedReader) Next() ([]byte, error) { } if size > r.sizeLimit { - return nil, errors.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size) + return nil, fmt.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size) } if cap(r.data) < int(size) { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index 7120822d95a..92666cd1d1c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -26,7 +26,6 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -222,7 +221,7 @@ func (c *Client) Store(ctx context.Context, req []byte) error { if scanner.Scan() { line = scanner.Text() } - err = errors.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) + err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) } if httpResp.StatusCode/100 == 5 { return RecoverableError{err, defaultBackoff} @@ -273,13 +272,13 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe } data, err := proto.Marshal(req) if err != nil { - return nil, errors.Wrapf(err, "unable to marshal read request") + return nil, fmt.Errorf("unable to marshal read request: %w", err) } compressed := snappy.Encode(nil, data) httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed)) if err != nil { - return nil, errors.Wrap(err, "unable to create request") + return nil, fmt.Errorf("unable to create request: %w", err) } httpReq.Header.Add("Content-Encoding", "snappy") httpReq.Header.Add("Accept-Encoding", "snappy") @@ -296,7 +295,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe start := time.Now() httpResp, err := c.Client.Do(httpReq.WithContext(ctx)) if err != nil { - return nil, errors.Wrap(err, "error sending request") + return nil, fmt.Errorf("error sending request: %w", err) } defer func() { io.Copy(io.Discard, httpResp.Body) @@ -307,26 +306,26 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe compressed, err = io.ReadAll(httpResp.Body) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("error reading response. HTTP status code: %s", httpResp.Status)) + return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err) } if httpResp.StatusCode/100 != 2 { - return nil, errors.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed))) + return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed))) } uncompressed, err := snappy.Decode(nil, compressed) if err != nil { - return nil, errors.Wrap(err, "error reading response") + return nil, fmt.Errorf("error reading response: %w", err) } var resp prompb.ReadResponse err = proto.Unmarshal(uncompressed, &resp) if err != nil { - return nil, errors.Wrap(err, "unable to unmarshal response body") + return nil, fmt.Errorf("unable to unmarshal response body: %w", err) } if len(resp.Results) != len(req.Queries) { - return nil, errors.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results)) + return nil, fmt.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results)) } return resp.Results[0], nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index f56fc6760f0..5d78107bb96 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -14,6 +14,7 @@ package remote import ( + "errors" "fmt" "io" "net/http" @@ -22,7 +23,6 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -180,7 +180,7 @@ func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.R return resType, nil } } - return 0, errors.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported) + return 0, fmt.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported) } // StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller. @@ -214,7 +214,7 @@ func StreamChunkedReadResponses( chk := iter.At() if chk.Chunk == nil { - return ss.Warnings(), errors.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref) + return ss.Warnings(), fmt.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref) } // Cut the chunk. @@ -239,11 +239,11 @@ func StreamChunkedReadResponses( QueryIndex: queryIndex, }) if err != nil { - return ss.Warnings(), errors.Wrap(err, "marshal ChunkedReadResponse") + return ss.Warnings(), fmt.Errorf("marshal ChunkedReadResponse: %w", err) } if _, err := stream.Write(b); err != nil { - return ss.Warnings(), errors.Wrap(err, "write to stream") + return ss.Warnings(), fmt.Errorf("write to stream: %w", err) } chks = chks[:0] } @@ -395,16 +395,16 @@ func (c *concreteSeriesIterator) Err() error { func validateLabelsAndMetricName(ls labels.Labels) error { for i, l := range ls { if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return errors.Errorf("invalid metric name: %v", l.Value) + return fmt.Errorf("invalid metric name: %v", l.Value) } if !model.LabelName(l.Name).IsValid() { - return errors.Errorf("invalid label name: %v", l.Name) + return fmt.Errorf("invalid label name: %v", l.Name) } if !model.LabelValue(l.Value).IsValid() { - return errors.Errorf("invalid label value: %v", l.Value) + return fmt.Errorf("invalid label value: %v", l.Value) } if i > 0 && l.Name == ls[i-1].Name { - return errors.Errorf("duplicate label with name: %v", l.Name) + return fmt.Errorf("duplicate label with name: %v", l.Name) } } return nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go index eee36463b10..21de565ed92 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -15,11 +15,11 @@ package remote import ( "context" + "errors" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/scrape" diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go index 07176359379..154eb73f9f2 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go @@ -15,8 +15,8 @@ package remote import ( "context" - - "github.com/pkg/errors" + "errors" + "fmt" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -164,12 +164,12 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers . m, added := q.addExternalLabels(matchers) query, err := ToQuery(q.mint, q.maxt, m, hints) if err != nil { - return storage.ErrSeriesSet(errors.Wrap(err, "toQuery")) + return storage.ErrSeriesSet(fmt.Errorf("toQuery: %w", err)) } res, err := q.client.Read(q.ctx, query) if err != nil { - return storage.ErrSeriesSet(errors.Wrap(err, "remote_read")) + return storage.ErrSeriesSet(fmt.Errorf("remote_read: %w", err)) } return newSeriesSetFilter(FromQueryResult(sortSeries, res), added) } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index 4de015e7396..88892e62e3c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -120,7 +120,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error { externalLabels := conf.GlobalConfig.ExternalLabels if !rrConf.FilterExternalLabels { - externalLabels = make(labels.Labels, 0) + externalLabels = labels.EmptyLabels() } queryables = append(queryables, NewSampleAndChunkQueryableClient( c, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index a7399ee5325..be401e95d4a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/wal" ) @@ -268,6 +269,12 @@ func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, return 0, nil } +func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. + // UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now. + return 0, nil +} + // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { t.writeStorage.samplesIn.incr(t.samples + t.exemplars) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 1807543ff26..64936222393 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -15,12 +15,12 @@ package remote import ( "context" + "errors" "fmt" "net/http" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/prompb" @@ -67,10 +67,11 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause. func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error { - switch errors.Cause(err) { - case storage.ErrNotFound: + unwrapedErr := errors.Unwrap(err) + switch { + case errors.Is(unwrapedErr, storage.ErrNotFound): return storage.ErrNotFound - case storage.ErrOutOfOrderExemplar: + case errors.Is(unwrapedErr, storage.ErrOutOfOrderExemplar): *outOfOrderErrs++ level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) return nil @@ -97,8 +98,8 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err for _, s := range ts.Samples { _, err = app.Append(0, labels, s.Timestamp, s.Value) if err != nil { - switch errors.Cause(err) { - case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp: + unwrapedErr := errors.Unwrap(err) + if errors.Is(unwrapedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrapedErr, storage.ErrOutOfBounds) || errors.Is(unwrapedErr, storage.ErrDuplicateSampleForTimestamp) { level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 0b1659ce029..d5f80006fc2 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -45,6 +45,8 @@ var ( Name: "prometheus_template_text_expansions_total", Help: "The total number of template text expansions.", }) + + errNaNOrInf = errors.New("value is NaN or Inf") ) func init() { @@ -315,15 +317,24 @@ func NewTemplateExpander( if err != nil { return "", err } - if math.IsNaN(v) || math.IsInf(v, 0) { + + tm, err := floatToTime(v) + switch { + case errors.Is(err, errNaNOrInf): return fmt.Sprintf("%.4g", v), nil + case err != nil: + return "", err } - timestamp := v * 1e9 - if timestamp > math.MaxInt64 || timestamp < math.MinInt64 { - return "", fmt.Errorf("%v cannot be represented as a nanoseconds timestamp since it overflows int64", v) + + return fmt.Sprint(tm), nil + }, + "toTime": func(i interface{}) (*time.Time, error) { + v, err := convertToFloat(i) + if err != nil { + return nil, err } - t := model.TimeFromUnixNano(int64(timestamp)).Time().UTC() - return fmt.Sprint(t), nil + + return floatToTime(v) }, "pathPrefix": func() string { return externalURL.Path @@ -446,3 +457,15 @@ func (te Expander) ParseTest() error { } return nil } + +func floatToTime(v float64) (*time.Time, error) { + if math.IsNaN(v) || math.IsInf(v, 0) { + return nil, errNaNOrInf + } + timestamp := v * 1e9 + if timestamp > math.MaxInt64 || timestamp < math.MinInt64 { + return nil, fmt.Errorf("%v cannot be represented as a nanoseconds timestamp since it overflows int64", v) + } + t := model.TimeFromUnixNano(int64(timestamp)).Time().UTC() + return &t, nil +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 6b8b65dda7a..8fd1066ba2d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -116,7 +116,7 @@ type ChunkWriter interface { // ChunkReader provides reading access of serialized time series data. type ChunkReader interface { // Chunk returns the series data chunk with the given reference. - Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) + Chunk(meta chunks.Meta) (chunkenc.Chunk, error) // Close releases all underlying resources of the reader. Close() error @@ -189,12 +189,39 @@ type BlockMetaCompaction struct { // this block. Parents []BlockDesc `json:"parents,omitempty"` Failed bool `json:"failed,omitempty"` + // Additional information about the compaction, for example, block created from out-of-order chunks. + Hints []string `json:"hints,omitempty"` +} + +func (bm *BlockMetaCompaction) SetOutOfOrder() { + if bm.containsHint(CompactionHintFromOutOfOrder) { + return + } + bm.Hints = append(bm.Hints, CompactionHintFromOutOfOrder) + sort.Strings(bm.Hints) +} + +func (bm *BlockMetaCompaction) FromOutOfOrder() bool { + return bm.containsHint(CompactionHintFromOutOfOrder) +} + +func (bm *BlockMetaCompaction) containsHint(hint string) bool { + for _, h := range bm.Hints { + if h == hint { + return true + } + } + return false } const ( indexFilename = "index" metaFilename = "meta.json" metaVersion1 = 1 + + // CompactionHintFromOutOfOrder is a hint noting that the block + // was created from out-of-order chunks. + CompactionHintFromOutOfOrder = "from-out-of-order" ) func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 09b355368db..4db30799752 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -39,7 +39,7 @@ type BlockWriter struct { } // ErrNoSeriesAppended is returned if the series count is zero while flushing blocks. -var ErrNoSeriesAppended error = errors.New("no series appended, aborting") +var ErrNoSeriesAppended = errors.New("no series appended, aborting") // NewBlockWriter create a new block writer. // @@ -71,7 +71,7 @@ func (w *BlockWriter) initHead() error { opts := DefaultHeadOptions() opts.ChunkRange = w.blockSize opts.ChunkDirRoot = w.chunkDir - h, err := NewHead(nil, w.logger, nil, opts, NewHeadStats()) + h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats()) if err != nil { return errors.Wrap(err, "tsdb.NewHead") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go index 833c9794b6b..60531023ba3 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go @@ -119,11 +119,18 @@ type bstreamReader struct { buffer uint64 // The current buffer, filled from the stream, containing up to 8 bytes from which read bits. valid uint8 // The number of right-most bits valid to read (from left) in the current 8 byte buffer. + last byte // A copy of the last byte of the stream. } func newBReader(b []byte) bstreamReader { + // The last byte of the stream can be updated later, so we take a copy. + var last byte + if len(b) > 0 { + last = b[len(b)-1] + } return bstreamReader{ stream: b, + last: last, } } @@ -223,17 +230,24 @@ func (b *bstreamReader) loadNextBuffer(nbits uint8) bool { return true } - // We're here if the are 8 or less bytes left in the stream. Since this reader needs - // to handle race conditions with concurrent writes happening on the very last byte - // we make sure to never over more than the minimum requested bits (rounded up to - // the next byte). The following code is slower but called less frequently. + // We're here if there are 8 or less bytes left in the stream. + // The following code is slower but called less frequently. nbytes := int((nbits / 8) + 1) if b.streamOffset+nbytes > len(b.stream) { nbytes = len(b.stream) - b.streamOffset } buffer := uint64(0) - for i := 0; i < nbytes; i++ { + skip := 0 + if b.streamOffset+nbytes == len(b.stream) { + // There can be concurrent writes happening on the very last byte + // of the stream, so use the copy we took at initialization time. + buffer = buffer | uint64(b.last) + // Read up to the byte before + skip = 1 + } + + for i := 0; i < nbytes-skip; i++ { buffer = buffer | (uint64(b.stream[b.streamOffset+i]) << uint(8*(nbytes-i-1))) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index bffb7e75aba..c5f8036a715 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -39,6 +39,21 @@ const ( EncXOR ) +// Chunk encodings for out-of-order chunks. +// These encodings must be only used by the Head block for its internal bookkeeping. +const ( + OutOfOrderMask = 0b10000000 + EncOOOXOR = EncXOR | OutOfOrderMask +) + +func IsOutOfOrderChunk(e Encoding) bool { + return (e & OutOfOrderMask) != 0 +} + +func IsValidEncoding(e Encoding) bool { + return e == EncXOR || e == EncOOOXOR +} + // Chunk holds a sequence of sample pairs that can be iterated over and appended to. type Chunk interface { // Bytes returns the underlying byte slice of the chunk. @@ -155,7 +170,7 @@ func NewPool() Pool { func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { switch e { - case EncXOR: + case EncXOR, EncOOOXOR: c := p.xor.Get().(*XORChunk) c.b.stream = b c.b.count = 0 @@ -166,7 +181,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { func (p *pool) Put(c Chunk) error { switch c.Encoding() { - case EncXOR: + case EncXOR, EncOOOXOR: xc, ok := c.(*XORChunk) // This may happen often with wrapped chunks. Nothing we can really do about // it but returning an error would cause a lot of allocations again. Thus, @@ -188,7 +203,7 @@ func (p *pool) Put(c Chunk) error { // bytes. func FromData(e Encoding, d []byte) (Chunk, error) { switch e { - case EncXOR: + case EncXOR, EncOOOXOR: return &XORChunk{b: bstream{count: 0, stream: d}}, nil } return nil, errors.Errorf("invalid chunk encoding %q", e) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go index ba00a6e8118..3429c2c606a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go @@ -88,6 +88,8 @@ func (c *XORChunk) Compact() { } // Appender implements the Chunk interface. +// It is not valid to call Appender() multiple times concurrently or to use multiple +// Appenders on the same chunk. func (c *XORChunk) Appender() (Appender, error) { it := c.iterator(nil) @@ -115,9 +117,6 @@ func (c *XORChunk) Appender() (Appender, error) { } func (c *XORChunk) iterator(it Iterator) *xorIterator { - // Should iterators guarantee to act on a copy of the data so it doesn't lock append? - // When using striped locks to guard access to chunks, probably yes. - // Could only copy data if the chunk is not completed yet. if xorIter, ok := it.(*xorIterator); ok { xorIter.Reset(c.b.bytes()) return xorIter @@ -132,6 +131,9 @@ func (c *XORChunk) iterator(it Iterator) *xorIterator { } // Iterator implements the Chunk interface. +// Iterator() must not be called concurrently with any modifications to the chunk, +// but after it returns you can use an Iterator concurrently with an Appender or +// other Iterators. func (c *XORChunk) Iterator(it Iterator) Iterator { return c.iterator(it) } @@ -457,3 +459,12 @@ func (it *xorIterator) readValue() bool { it.numRead++ return true } + +// OOOXORChunk holds a XORChunk and overrides the Encoding() method. +type OOOXORChunk struct { + *XORChunk +} + +func (c *OOOXORChunk) Encoding() Encoding { + return EncOOOXOR +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go index 628880b4c4a..ab34eb06c7d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go @@ -16,12 +16,25 @@ package chunks import ( "errors" "sync" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/tsdb/chunkenc" ) +const ( + // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again. + chunkRefMapShrinkThreshold = 1000 + + // Minimum interval between shrinking of chunkWriteQueue.chunkRefMap. + chunkRefMapMinShrinkInterval = 10 * time.Minute + + // Maximum size of segment used by job queue (number of elements). With chunkWriteJob being 64 bytes, + // this will use ~512 KiB for empty queue. + maxChunkQueueSegmentSize = 8192 +) + type chunkWriteJob struct { cutFile bool seriesRef HeadSeriesRef @@ -36,23 +49,30 @@ type chunkWriteJob struct { // Chunks that shall be written get added to the queue, which is consumed asynchronously. // Adding jobs to the queue is non-blocking as long as the queue isn't full. type chunkWriteQueue struct { - jobs chan chunkWriteJob + jobs *writeJobQueue - chunkRefMapMtx sync.RWMutex - chunkRefMap map[ChunkDiskMapperRef]chunkenc.Chunk + chunkRefMapMtx sync.RWMutex + chunkRefMap map[ChunkDiskMapperRef]chunkenc.Chunk + chunkRefMapPeakSize int // Largest size that chunkRefMap has grown to since the last time we shrank it. + chunkRefMapLastShrink time.Time // When the chunkRefMap has been shrunk the last time. - isRunningMtx sync.Mutex // Protects the isRunning property. - isRunning bool // Used to prevent that new jobs get added to the queue when the chan is already closed. + // isRunningMtx serves two purposes: + // 1. It protects isRunning field. + // 2. It serializes adding of jobs to the chunkRefMap in addJob() method. If jobs channel is full then addJob() will block + // while holding this mutex, which guarantees that chunkRefMap won't ever grow beyond the queue size + 1. + isRunningMtx sync.Mutex + isRunning bool // Used to prevent that new jobs get added to the queue when the chan is already closed. workerWg sync.WaitGroup writeChunk writeChunkF - // Keeping three separate counters instead of only a single CounterVec to improve the performance of the critical + // Keeping separate counters instead of only a single CounterVec to improve the performance of the critical // addJob() method which otherwise would need to perform a WithLabelValues call on the CounterVec. adds prometheus.Counter gets prometheus.Counter completed prometheus.Counter + shrink prometheus.Counter } // writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests. @@ -67,14 +87,21 @@ func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChu []string{"operation"}, ) + segmentSize := size + if segmentSize > maxChunkQueueSegmentSize { + segmentSize = maxChunkQueueSegmentSize + } + q := &chunkWriteQueue{ - jobs: make(chan chunkWriteJob, size), - chunkRefMap: make(map[ChunkDiskMapperRef]chunkenc.Chunk, size), - writeChunk: writeChunk, + jobs: newWriteJobQueue(size, segmentSize), + chunkRefMap: make(map[ChunkDiskMapperRef]chunkenc.Chunk), + chunkRefMapLastShrink: time.Now(), + writeChunk: writeChunk, adds: counters.WithLabelValues("add"), gets: counters.WithLabelValues("get"), completed: counters.WithLabelValues("complete"), + shrink: counters.WithLabelValues("shrink"), } if reg != nil { @@ -90,7 +117,12 @@ func (c *chunkWriteQueue) start() { go func() { defer c.workerWg.Done() - for job := range c.jobs { + for { + job, ok := c.jobs.pop() + if !ok { + return + } + c.processJob(job) } }() @@ -112,6 +144,42 @@ func (c *chunkWriteQueue) processJob(job chunkWriteJob) { delete(c.chunkRefMap, job.ref) c.completed.Inc() + + c.shrinkChunkRefMap() +} + +// shrinkChunkRefMap checks whether the conditions to shrink the chunkRefMap are met, +// if so chunkRefMap is reinitialized. The chunkRefMapMtx must be held when calling this method. +// +// We do this because Go runtime doesn't release internal memory used by map after map has been emptied. +// To achieve that we create new map instead and throw the old one away. +func (c *chunkWriteQueue) shrinkChunkRefMap() { + if len(c.chunkRefMap) > 0 { + // Can't shrink it while there is data in it. + return + } + + if c.chunkRefMapPeakSize < chunkRefMapShrinkThreshold { + // Not shrinking it because it has not grown to the minimum threshold yet. + return + } + + now := time.Now() + + if now.Sub(c.chunkRefMapLastShrink) < chunkRefMapMinShrinkInterval { + // Not shrinking it because the minimum duration between shrink-events has not passed yet. + return + } + + // Re-initialize the chunk ref map to half of the peak size that it has grown to since the last re-init event. + // We are trying to hit the sweet spot in the trade-off between initializing it to a very small size + // potentially resulting in many allocations to re-grow it, and initializing it to a large size potentially + // resulting in unused allocated memory. + c.chunkRefMap = make(map[ChunkDiskMapperRef]chunkenc.Chunk, c.chunkRefMapPeakSize/2) + + c.chunkRefMapPeakSize = 0 + c.chunkRefMapLastShrink = now + c.shrink.Inc() } func (c *chunkWriteQueue) addJob(job chunkWriteJob) (err error) { @@ -125,14 +193,25 @@ func (c *chunkWriteQueue) addJob(job chunkWriteJob) (err error) { defer c.isRunningMtx.Unlock() if !c.isRunning { - return errors.New("queue is not started") + return errors.New("queue is not running") } c.chunkRefMapMtx.Lock() c.chunkRefMap[job.ref] = job.chk + + // Keep track of the peak usage of c.chunkRefMap. + if len(c.chunkRefMap) > c.chunkRefMapPeakSize { + c.chunkRefMapPeakSize = len(c.chunkRefMap) + } c.chunkRefMapMtx.Unlock() - c.jobs <- job + if ok := c.jobs.push(job); !ok { + c.chunkRefMapMtx.Lock() + delete(c.chunkRefMap, job.ref) + c.chunkRefMapMtx.Unlock() + + return errors.New("queue is closed") + } return nil } @@ -159,7 +238,7 @@ func (c *chunkWriteQueue) stop() { c.isRunning = false - close(c.jobs) + c.jobs.close() c.workerWg.Wait() } @@ -171,7 +250,7 @@ func (c *chunkWriteQueue) queueIsEmpty() bool { func (c *chunkWriteQueue) queueIsFull() bool { // When the queue is full and blocked on the writer the chunkRefMap has one more job than the cap of the jobCh // because one job is currently being processed and blocked in the writer. - return c.queueSize() == cap(c.jobs)+1 + return c.queueSize() == c.jobs.maxSize+1 } func (c *chunkWriteQueue) queueSize() int { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go index 72cb0311bc0..6d04998e803 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go @@ -81,9 +81,10 @@ func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) { // HeadChunkID refers to a specific chunk in a series (memSeries) in the Head. // Each memSeries has its own monotonically increasing number to refer to its chunks. // If the HeadChunkID value is... -// * memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk. -// * less than the above, but >= memSeries.firstID, then it's -// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID. +// - memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk. +// - less than the above, but >= memSeries.firstID, then it's +// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID. +// // Example: // assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9]. // | HeadChunkID value | refers to ... | @@ -120,6 +121,15 @@ type Meta struct { // Time range the data covers. // When MaxTime == math.MaxInt64 the chunk is still open and being appended to. MinTime, MaxTime int64 + + // OOOLastRef, OOOLastMinTime and OOOLastMaxTime are kept as markers for + // overlapping chunks. + // These fields point to the last created out of order Chunk (the head) that existed + // when Series() was called and was overlapping. + // Series() and Chunk() method responses should be consistent for the same + // query even if new data is added in between the calls. + OOOLastRef ChunkRef + OOOLastMinTime, OOOLastMaxTime int64 } // Iterator iterates over the chunks of a single time series. @@ -555,8 +565,8 @@ func (s *Reader) Size() int64 { } // Chunk returns a chunk from a given reference. -func (s *Reader) Chunk(ref ChunkRef) (chunkenc.Chunk, error) { - sgmIndex, chkStart := BlockChunkRef(ref).Unpack() +func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { + sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack() if sgmIndex >= len(s.bs) { return nil, errors.Errorf("segment index %d out of range", sgmIndex) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index edd7dd5419c..b2ad59fd6f4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -87,6 +87,18 @@ func (ref ChunkDiskMapperRef) Unpack() (seq, offset int) { return seq, offset } +func (ref ChunkDiskMapperRef) GreaterThanOrEqualTo(r ChunkDiskMapperRef) bool { + s1, o1 := ref.Unpack() + s2, o2 := r.Unpack() + return s1 > s2 || (s1 == s2 && o1 >= o2) +} + +func (ref ChunkDiskMapperRef) GreaterThan(r ChunkDiskMapperRef) bool { + s1, o1 := ref.Unpack() + s2, o2 := r.Unpack() + return s1 > s2 || (s1 == s2 && o1 > o2) +} + // CorruptionErr is an error that's returned when corruption is encountered. type CorruptionErr struct { Dir string @@ -360,11 +372,25 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro return files, nil } - info, err := os.Stat(files[lastFile]) + f, err := os.Open(files[lastFile]) if err != nil { - return files, errors.Wrap(err, "file stat during last head chunk file repair") + return files, errors.Wrap(err, "open file during last head chunk file repair") + } + + buf := make([]byte, MagicChunksSize) + size, err := f.Read(buf) + if err != nil && err != io.EOF { + return files, errors.Wrap(err, "failed to read magic number during last head chunk file repair") + } + if err := f.Close(); err != nil { + return files, errors.Wrap(err, "close file during last head chunk file repair") } - if info.Size() == 0 { + + // We either don't have enough bytes for the magic number or the magic number is 0. + // NOTE: we should not check for wrong magic number here because that error + // needs to be sent up the function called (already done elsewhere) + // for proper repair mechanism to happen in the Head. + if size < MagicChunksSize || binary.BigEndian.Uint32(buf) == 0 { // Corrupt file, hence remove it. if err := os.RemoveAll(files[lastFile]); err != nil { return files, errors.Wrap(err, "delete corrupted, empty head chunk file during last file repair") @@ -736,7 +762,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error // and runs the provided function with information about each chunk. It returns on the first error encountered. // NOTE: This method needs to be called at least once after creating ChunkDiskMapper // to set the maxt of all the file. -func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error) (err error) { +func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error) (err error) { cdm.writePathMtx.Lock() defer cdm.writePathMtx.Unlock() @@ -799,7 +825,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu break } - idx += ChunkEncodingSize // Skip encoding. + chkEnc := chunkenc.Encoding(mmapFile.byteSlice.Range(idx, idx+ChunkEncodingSize)[0]) + idx += ChunkEncodingSize dataLen, n := binary.Uvarint(mmapFile.byteSlice.Range(idx, idx+MaxChunkLengthFieldSize)) idx += n @@ -834,7 +861,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu mmapFile.maxt = maxt } - if err := f(seriesRef, chunkRef, mint, maxt, numSamples); err != nil { + if err := f(seriesRef, chunkRef, mint, maxt, numSamples, chkEnc); err != nil { if cerr, ok := err.(*CorruptionErr); ok { cerr.Dir = cdm.dir.Name() cerr.FileIndex = segID @@ -857,12 +884,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return nil } -// Truncate deletes the head chunk files which are strictly below the mint. -// mint should be in milliseconds. -func (cdm *ChunkDiskMapper) Truncate(mint int64) error { - if !cdm.fileMaxtSet { - return errors.New("maxt of the files are not set") - } +// Truncate deletes the head chunk files whose file number is less than given fileNo. +func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error { cdm.readPathMtx.RLock() // Sort the file indices, else if files deletion fails in between, @@ -875,12 +898,10 @@ func (cdm *ChunkDiskMapper) Truncate(mint int64) error { var removedFiles []int for _, seq := range chkFileIndices { - if seq == cdm.curFileSequence || cdm.mmappedChunkFiles[seq].maxt >= mint { + if seq == cdm.curFileSequence || uint32(seq) >= fileNo { break } - if cdm.mmappedChunkFiles[seq].maxt < mint { - removedFiles = append(removedFiles, seq) - } + removedFiles = append(removedFiles, seq) } cdm.readPathMtx.RUnlock() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/queue.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/queue.go new file mode 100644 index 00000000000..860381a5fef --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/queue.go @@ -0,0 +1,141 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chunks + +import "sync" + +// writeJobQueue is similar to buffered channel of chunkWriteJob, but manages its own buffers +// to avoid using a lot of memory when it's empty. It does that by storing elements into segments +// of equal size (segmentSize). When segment is not used anymore, reference to it are removed, +// so it can be treated as a garbage. +type writeJobQueue struct { + maxSize int + segmentSize int + + mtx sync.Mutex // protects all following variables + pushed, popped *sync.Cond // signalled when something is pushed into the queue or popped from it + first, last *writeJobQueueSegment // pointer to first and last segment, if any + size int // total size of the queue + closed bool // after closing the queue, nothing can be pushed to it +} + +type writeJobQueueSegment struct { + segment []chunkWriteJob + nextRead, nextWrite int // index of next read and next write in this segment. + nextSegment *writeJobQueueSegment // next segment, if any +} + +func newWriteJobQueue(maxSize, segmentSize int) *writeJobQueue { + if maxSize <= 0 || segmentSize <= 0 { + panic("invalid queue") + } + + q := &writeJobQueue{ + maxSize: maxSize, + segmentSize: segmentSize, + } + + q.pushed = sync.NewCond(&q.mtx) + q.popped = sync.NewCond(&q.mtx) + return q +} + +func (q *writeJobQueue) close() { + q.mtx.Lock() + defer q.mtx.Unlock() + + q.closed = true + + // Unblock all blocked goroutines. + q.pushed.Broadcast() + q.popped.Broadcast() +} + +// push blocks until there is space available in the queue, and then adds job to the queue. +// If queue is closed or gets closed while waiting for space, push returns false. +func (q *writeJobQueue) push(job chunkWriteJob) bool { + q.mtx.Lock() + defer q.mtx.Unlock() + + // Wait until queue has more space or is closed. + for !q.closed && q.size >= q.maxSize { + q.popped.Wait() + } + + if q.closed { + return false + } + + // Check if this segment has more space for writing, and create new one if not. + if q.last == nil || q.last.nextWrite >= q.segmentSize { + prevLast := q.last + q.last = &writeJobQueueSegment{ + segment: make([]chunkWriteJob, q.segmentSize), + } + + if prevLast != nil { + prevLast.nextSegment = q.last + } + if q.first == nil { + q.first = q.last + } + } + + q.last.segment[q.last.nextWrite] = job + q.last.nextWrite++ + q.size++ + q.pushed.Signal() + return true +} + +// pop returns first job from the queue, and true. +// If queue is empty, pop blocks until there is a job (returns true), or until queue is closed (returns false). +// If queue was already closed, pop first returns all remaining elements from the queue (with true value), and only then returns false. +func (q *writeJobQueue) pop() (chunkWriteJob, bool) { + q.mtx.Lock() + defer q.mtx.Unlock() + + // wait until something is pushed to the queue, or queue is closed. + for q.size == 0 { + if q.closed { + return chunkWriteJob{}, false + } + + q.pushed.Wait() + } + + res := q.first.segment[q.first.nextRead] + q.first.segment[q.first.nextRead] = chunkWriteJob{} // clear just-read element + q.first.nextRead++ + q.size-- + + // If we have read all possible elements from first segment, we can drop it. + if q.first.nextRead >= q.segmentSize { + q.first = q.first.nextSegment + if q.first == nil { + q.last = nil + } + } + + q.popped.Signal() + return res, true +} + +// length returns number of all jobs in the queue. +func (q *writeJobQueue) length() int { + q.mtx.Lock() + defer q.mtx.Unlock() + + return q.size +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index ab0822d73be..854918c369b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -33,6 +33,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" "golang.org/x/sync/errgroup" "github.com/prometheus/prometheus/config" @@ -69,18 +70,18 @@ var ErrNotReady = errors.New("TSDB not ready") // millisecond precision timestamps. func DefaultOptions() *Options { return &Options{ - WALSegmentSize: wal.DefaultSegmentSize, - MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, - RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), - MinBlockDuration: DefaultBlockDuration, - MaxBlockDuration: DefaultBlockDuration, - NoLockfile: false, - AllowOverlappingBlocks: false, - WALCompression: false, - StripeSize: DefaultStripeSize, - HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, - IsolationDisabled: defaultIsolationDisabled, - HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, + WALSegmentSize: wal.DefaultSegmentSize, + MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, + RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), + MinBlockDuration: DefaultBlockDuration, + MaxBlockDuration: DefaultBlockDuration, + NoLockfile: false, + AllowOverlappingCompaction: true, + WALCompression: false, + StripeSize: DefaultStripeSize, + HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, + IsolationDisabled: defaultIsolationDisabled, + OutOfOrderCapMax: DefaultOutOfOrderCapMax, } } @@ -112,9 +113,13 @@ type Options struct { // NoLockfile disables creation and consideration of a lock file. NoLockfile bool - // Overlapping blocks are allowed if AllowOverlappingBlocks is true. - // This in-turn enables vertical compaction and vertical query merge. - AllowOverlappingBlocks bool + // Compaction of overlapping blocks are allowed if AllowOverlappingCompaction is true. + // This is an optional flag for overlapping blocks. + // The reason why this flag exists is because there are various users of the TSDB + // that do not want vertical compaction happening on ingest time. Instead, + // they'd rather keep overlapping blocks and let another component do the overlapping compaction later. + // For Prometheus, this will always be true. + AllowOverlappingCompaction bool // WALCompression will turn on Snappy compression for records on the WAL. WALCompression bool @@ -160,6 +165,15 @@ type Options struct { // Disables isolation between reads and in-flight appends. IsolationDisabled bool + + // OutOfOrderTimeWindow specifies how much out of order is allowed, if any. + // This can change during run-time, so this value from here should only be used + // while initialising. + OutOfOrderTimeWindow int64 + + // OutOfOrderCapMax is maximum capacity for OOO chunks (in samples). + // If it is <=0, the default value is assumed. + OutOfOrderCapMax int64 } type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} @@ -197,6 +211,13 @@ type DB struct { // Cancel a running compaction when a shutdown is initiated. compactCancel context.CancelFunc + + // oooWasEnabled is true if out of order support was enabled at least one time + // during the time TSDB was up. In which case we need to keep supporting + // out-of-order compaction and vertical queries. + oooWasEnabled atomic.Bool + + registerer prometheus.Registerer } type dbMetrics struct { @@ -318,7 +339,7 @@ type DBStats struct { } // NewDBStats returns a new DBStats object initialized using the -// the new function from each component. +// new function from each component. func NewDBStats() *DBStats { return &DBStats{ Head: NewHeadStats(), @@ -372,9 +393,17 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { if err != nil { return err } + var wbl *wal.WAL + wblDir := filepath.Join(db.dir, wal.WblDirName) + if _, err := os.Stat(wblDir); !os.IsNotExist(err) { + wbl, err = wal.Open(db.logger, wblDir) + if err != nil { + return err + } + } opts := DefaultHeadOptions() opts.ChunkDirRoot = db.dir - head, err := NewHead(nil, db.logger, w, opts, NewHeadStats()) + head, err := NewHead(nil, db.logger, w, wbl, opts, NewHeadStats()) if err != nil { return err } @@ -430,7 +459,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue opts := DefaultHeadOptions() opts.ChunkDirRoot = db.dir - head, err := NewHead(nil, db.logger, nil, opts, NewHeadStats()) + head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats()) if err != nil { return nil, err } @@ -448,9 +477,17 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue if err != nil { return nil, err } + var wbl *wal.WAL + wblDir := filepath.Join(db.dir, wal.WblDirName) + if _, err := os.Stat(wblDir); !os.IsNotExist(err) { + wbl, err = wal.Open(db.logger, wblDir) + if err != nil { + return nil, err + } + } opts := DefaultHeadOptions() opts.ChunkDirRoot = db.dir - head, err = NewHead(nil, db.logger, w, opts, NewHeadStats()) + head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats()) if err != nil { return nil, err } @@ -598,6 +635,12 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { if opts.MinBlockDuration > opts.MaxBlockDuration { opts.MaxBlockDuration = opts.MinBlockDuration } + if opts.OutOfOrderCapMax <= 0 { + opts.OutOfOrderCapMax = DefaultOutOfOrderCapMax + } + if opts.OutOfOrderTimeWindow < 0 { + opts.OutOfOrderTimeWindow = 0 + } if len(rngs) == 0 { // Start with smallest block duration and create exponential buckets until the exceed the @@ -634,6 +677,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } walDir := filepath.Join(dir, "wal") + wblDir := filepath.Join(dir, wal.WblDirName) // Migrate old WAL if one exists. if err := MigrateWAL(l, walDir); err != nil { @@ -656,6 +700,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs autoCompact: true, chunkPool: chunkenc.NewPool(), blocksToDelete: opts.BlocksToDelete, + registerer: r, } defer func() { // Close files if startup fails somewhere. @@ -694,7 +739,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } db.compactCancel = cancel - var wlog *wal.WAL + var wlog, wblog *wal.WAL segmentSize := wal.DefaultSegmentSize // Wal is enabled. if opts.WALSegmentSize >= 0 { @@ -706,8 +751,19 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if err != nil { return nil, err } + // Check if there is a WBL on disk, in which case we should replay that data. + wblSize, err := fileutil.DirSize(wblDir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if opts.OutOfOrderTimeWindow > 0 || wblSize > 0 { + wblog, err = wal.NewSize(l, r, wblDir, segmentSize, opts.WALCompression) + if err != nil { + return nil, err + } + } } - + db.oooWasEnabled.Store(opts.OutOfOrderTimeWindow > 0) headOpts := DefaultHeadOptions() headOpts.ChunkRange = rngs[0] headOpts.ChunkDirRoot = dir @@ -719,11 +775,13 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.EnableExemplarStorage = opts.EnableExemplarStorage headOpts.MaxExemplars.Store(opts.MaxExemplars) headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown + headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow) + headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax) if opts.IsolationDisabled { // We only override this flag if isolation is disabled at DB level. We use the default otherwise. headOpts.IsolationDisabled = opts.IsolationDisabled } - db.head, err = NewHead(r, l, wlog, headOpts, stats.Head) + db.head, err = NewHead(r, l, wlog, wblog, headOpts, stats.Head) if err != nil { return nil, err } @@ -741,20 +799,36 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } // Set the min valid time for the ingested samples // to be no lower than the maxt of the last block. - blocks := db.Blocks() minValidTime := int64(math.MinInt64) - if len(blocks) > 0 { - minValidTime = blocks[len(blocks)-1].Meta().MaxTime + // We do not consider blocks created from out-of-order samples for Head's minValidTime + // since minValidTime is only for the in-order data and we do not want to discard unnecessary + // samples from the Head. + inOrderMaxTime, ok := db.inOrderBlocksMaxTime() + if ok { + minValidTime = inOrderMaxTime } if initErr := db.head.Init(minValidTime); initErr != nil { db.head.metrics.walCorruptionsTotal.Inc() - level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) - if err := wlog.Repair(initErr); err != nil { - return nil, errors.Wrap(err, "repair corrupted WAL") + isOOOErr := isErrLoadOOOWal(initErr) + if isOOOErr { + level.Warn(db.logger).Log("msg", "Encountered OOO WAL read error, attempting repair", "err", initErr) + if err := wblog.Repair(initErr); err != nil { + return nil, errors.Wrap(err, "repair corrupted OOO WAL") + } + } else { + level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) + if err := wlog.Repair(initErr); err != nil { + return nil, errors.Wrap(err, "repair corrupted WAL") + } } } + if db.head.MinOOOTime() != int64(math.MaxInt64) { + // Some OOO data was replayed from the disk that needs compaction and cleanup. + db.oooWasEnabled.Store(true) + } + go db.run() return db, nil @@ -846,8 +920,58 @@ func (db *DB) Appender(ctx context.Context) storage.Appender { return dbAppender{db: db, Appender: db.head.Appender(ctx)} } +// ApplyConfig applies a new config to the DB. +// Behaviour of 'OutOfOrderTimeWindow' is as follows: +// OOO enabled = oooTimeWindow > 0. OOO disabled = oooTimeWindow is 0. +// 1) Before: OOO disabled, Now: OOO enabled => +// - A new WBL is created for the head block. +// - OOO compaction is enabled. +// - Overlapping queries are enabled. +// +// 2) Before: OOO enabled, Now: OOO enabled => +// - Only the time window is updated. +// +// 3) Before: OOO enabled, Now: OOO disabled => +// - Time Window set to 0. So no new OOO samples will be allowed. +// - OOO WBL will stay and will be eventually cleaned up. +// - OOO Compaction and overlapping queries will remain enabled until a restart or until all OOO samples are compacted. +// +// 4) Before: OOO disabled, Now: OOO disabled => no-op. func (db *DB) ApplyConfig(conf *config.Config) error { - return db.head.ApplyConfig(conf) + oooTimeWindow := int64(0) + if conf.StorageConfig.TSDBConfig != nil { + oooTimeWindow = conf.StorageConfig.TSDBConfig.OutOfOrderTimeWindow + } + if oooTimeWindow < 0 { + oooTimeWindow = 0 + } + + // Create WBL if it was not present and if OOO is enabled with WAL enabled. + var wblog *wal.WAL + var err error + if db.head.wbl != nil { + // The existing WBL from the disk might have been replayed while OOO was disabled. + wblog = db.head.wbl + } else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 { + segmentSize := wal.DefaultSegmentSize + // Wal is set to a custom size. + if db.opts.WALSegmentSize > 0 { + segmentSize = db.opts.WALSegmentSize + } + oooWalDir := filepath.Join(db.dir, wal.WblDirName) + wblog, err = wal.NewSize(db.logger, db.registerer, oooWalDir, segmentSize, db.opts.WALCompression) + if err != nil { + return err + } + } + + db.opts.OutOfOrderTimeWindow = oooTimeWindow + db.head.ApplyConfig(conf, wblog) + + if !db.oooWasEnabled.Load() { + db.oooWasEnabled.Store(oooTimeWindow > 0) + } + return nil } // dbAppender wraps the DB's head appender and triggers compactions on commit @@ -888,7 +1012,9 @@ func (db *DB) Compact() (returnErr error) { db.cmtx.Lock() defer db.cmtx.Unlock() defer func() { - if returnErr != nil { + if returnErr != nil && !errors.Is(returnErr, context.Canceled) { + // If we got an error because context was canceled then we're most likely + // shutting down TSDB and we don't need to report this on metrics db.metrics.compactionsFailed.Inc() } }() @@ -923,7 +1049,15 @@ func (db *DB) Compact() (returnErr error) { // so in order to make sure that overlaps are evaluated // consistently, we explicitly remove the last value // from the block interval here. - if err := db.compactHead(NewRangeHead(db.head, mint, maxt-1)); err != nil { + rh := NewRangeHeadWithIsolationDisabled(db.head, mint, maxt-1) + + // Compaction runs with isolation disabled, because head.compactable() + // ensures that maxt is more than chunkRange/2 back from now, and + // head.appendableMinValidTime() ensures that no new appends can start within the compaction range. + // We do need to wait for any overlapping appenders that started previously to finish. + db.head.WaitForAppendersOverlapping(rh.MaxTime()) + + if err := db.compactHead(rh); err != nil { return errors.Wrap(err, "compact head") } // Consider only successful compactions for WAL truncation. @@ -944,6 +1078,14 @@ func (db *DB) Compact() (returnErr error) { "block_range", db.head.chunkRange.Load(), ) } + + if lastBlockMaxt != math.MinInt64 { + // The head was compacted, so we compact OOO head as well. + if err := db.compactOOOHead(); err != nil { + return errors.Wrap(err, "compact ooo head") + } + } + return db.compactBlocks() } @@ -962,6 +1104,102 @@ func (db *DB) CompactHead(head *RangeHead) error { return nil } +// CompactOOOHead compacts the OOO Head. +func (db *DB) CompactOOOHead() error { + db.cmtx.Lock() + defer db.cmtx.Unlock() + + return db.compactOOOHead() +} + +func (db *DB) compactOOOHead() error { + if !db.oooWasEnabled.Load() { + return nil + } + oooHead, err := NewOOOCompactionHead(db.head) + if err != nil { + return errors.Wrap(err, "get ooo compaction head") + } + + ulids, err := db.compactOOO(db.dir, oooHead) + if err != nil { + return errors.Wrap(err, "compact ooo head") + } + if err := db.reloadBlocks(); err != nil { + errs := tsdb_errors.NewMulti(err) + for _, uid := range ulids { + if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { + errs.Add(errRemoveAll) + } + } + return errors.Wrap(errs.Err(), "reloadBlocks blocks after failed compact ooo head") + } + + lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef() + if lastWBLFile != 0 || minOOOMmapRef != 0 { + if err := db.head.truncateOOO(lastWBLFile, minOOOMmapRef); err != nil { + return errors.Wrap(err, "truncate ooo wbl") + } + } + + return nil +} + +// compactOOO creates a new block per possible block range in the compactor's directory from the OOO Head given. +// Each ULID in the result corresponds to a block in a unique time range. +func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID, err error) { + start := time.Now() + + blockSize := oooHead.ChunkRange() + oooHeadMint, oooHeadMaxt := oooHead.MinTime(), oooHead.MaxTime() + ulids := make([]ulid.ULID, 0) + defer func() { + if err != nil { + // Best effort removal of created block on any error. + for _, uid := range ulids { + _ = os.RemoveAll(filepath.Join(db.dir, uid.String())) + } + } + }() + + for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t = t + blockSize { + mint, maxt := t, t+blockSize + // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. + uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, nil) + if err != nil { + return nil, err + } + if uid.Compare(ulid.ULID{}) != 0 { + ulids = append(ulids, uid) + blockDir := filepath.Join(dest, uid.String()) + meta, _, err := readMetaFile(blockDir) + if err != nil { + return ulids, errors.Wrap(err, "read meta") + } + meta.Compaction.SetOutOfOrder() + _, err = writeMetaFile(db.logger, blockDir, meta) + if err != nil { + return ulids, errors.Wrap(err, "write meta") + } + } + } + + if len(ulids) == 0 { + level.Info(db.logger).Log( + "msg", "compact ooo head resulted in no blocks", + "duration", time.Since(start), + ) + return nil, nil + } + + level.Info(db.logger).Log( + "msg", "out-of-order compaction completed", + "duration", time.Since(start), + "ulids", fmt.Sprintf("%v", ulids), + ) + return ulids, nil +} + // compactHead compacts the given RangeHead. // The compaction mutex should be held before calling this method. func (db *DB) compactHead(head *RangeHead) error { @@ -1036,10 +1274,11 @@ func (db *DB) reload() error { if err := db.reloadBlocks(); err != nil { return errors.Wrap(err, "reloadBlocks") } - if len(db.blocks) == 0 { + maxt, ok := db.inOrderBlocksMaxTime() + if !ok { return nil } - if err := db.head.Truncate(db.blocks[len(db.blocks)-1].MaxTime()); err != nil { + if err := db.head.Truncate(maxt); err != nil { return errors.Wrap(err, "head truncate") } return nil @@ -1119,11 +1358,6 @@ func (db *DB) reloadBlocks() (err error) { sort.Slice(toLoad, func(i, j int) bool { return toLoad[i].Meta().MinTime < toLoad[j].Meta().MinTime }) - if !db.opts.AllowOverlappingBlocks { - if err := validateBlockSequence(toLoad); err != nil { - return errors.Wrap(err, "invalid block sequence") - } - } // Swap new blocks first for subsequently created readers to be seen. oldBlocks := db.blocks @@ -1295,25 +1529,6 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { return nil } -// validateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence. -func validateBlockSequence(bs []*Block) error { - if len(bs) <= 1 { - return nil - } - - var metas []BlockMeta - for _, b := range bs { - metas = append(metas, b.meta) - } - - overlaps := OverlappingBlocks(metas) - if len(overlaps) > 0 { - return errors.Errorf("block time ranges overlap: %s", overlaps) - } - - return nil -} - // TimeRange specifies minTime and maxTime range. type TimeRange struct { Min, Max int64 @@ -1426,6 +1641,21 @@ func (db *DB) Blocks() []*Block { return db.blocks } +// inOrderBlocksMaxTime returns the max time among the blocks that were not totally created +// out of out-of-order data. If the returned boolean is true, it means there is at least +// one such block. +func (db *DB) inOrderBlocksMaxTime() (maxt int64, ok bool) { + maxt, ok = int64(math.MinInt64), false + // If blocks are overlapping, last block might not have the max time. So check all blocks. + for _, b := range db.Blocks() { + if !b.meta.Compaction.FromOutOfOrder() && b.meta.MaxTime > maxt { + ok = true + maxt = b.meta.MaxTime + } + } + return maxt, ok +} + // Head returns the databases's head. func (db *DB) Head() *Head { return db.head @@ -1524,13 +1754,13 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err blocks = append(blocks, b) } } - var headQuerier storage.Querier + var inOrderHeadQuerier storage.Querier if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - headQuerier, err = NewBlockQuerier(rh, mint, maxt) + inOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head %s", rh) + return nil, errors.Wrapf(err, "open block querier for head %s", rh) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -1538,20 +1768,30 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := headQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head querier %s", rh) + if err := inOrderHeadQuerier.Close(); err != nil { + return nil, errors.Wrapf(err, "closing head block querier %s", rh) } - headQuerier = nil + inOrderHeadQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - headQuerier, err = NewBlockQuerier(rh, newMint, maxt) + inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) + return nil, errors.Wrapf(err, "open block querier for head while getting new querier %s", rh) } } } + var outOfOrderHeadQuerier storage.Querier + if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + rh := NewOOORangeHead(db.head, mint, maxt) + var err error + outOfOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) + if err != nil { + return nil, errors.Wrapf(err, "open block querier for ooo head %s", rh) + } + } + blockQueriers := make([]storage.Querier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockQuerier(b, mint, maxt) @@ -1566,14 +1806,18 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err } return nil, errors.Wrapf(err, "open querier for block %s", b) } - if headQuerier != nil { - blockQueriers = append(blockQueriers, headQuerier) + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) + } + if outOfOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) } return storage.NewMergeQuerier(blockQueriers, nil, storage.ChainedSeriesMerge), nil } -// ChunkQuerier returns a new chunk querier over the data partition for the given time range. -func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { +// blockQueriersForRange returns individual block chunk queriers from the persistent blocks, in-order head block, and the +// out-of-order head block, overlapping with the given time range. +func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerier, error) { var blocks []BlockReader db.mtx.RLock() @@ -1584,11 +1828,11 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu blocks = append(blocks, b) } } - var headQuerier storage.ChunkQuerier + var inOrderHeadQuerier storage.ChunkQuerier if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - headQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open querier for head %s", rh) } @@ -1598,20 +1842,30 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := headQuerier.Close(); err != nil { + if err := inOrderHeadQuerier.Close(); err != nil { return nil, errors.Wrapf(err, "closing head querier %s", rh) } - headQuerier = nil + inOrderHeadQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - headQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) + inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) if err != nil { return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) } } } + var outOfOrderHeadQuerier storage.ChunkQuerier + if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + rh := NewOOORangeHead(db.head, mint, maxt) + var err error + outOfOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + if err != nil { + return nil, errors.Wrapf(err, "open block chunk querier for ooo head %s", rh) + } + } + blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockChunkQuerier(b, mint, maxt) @@ -1626,10 +1880,22 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu } return nil, errors.Wrapf(err, "open querier for block %s", b) } - if headQuerier != nil { - blockQueriers = append(blockQueriers, headQuerier) + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) } + if outOfOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) + } + + return blockQueriers, nil +} +// ChunkQuerier returns a new chunk querier over the data partition for the given time range. +func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { + blockQueriers, err := db.blockChunkQuerierForRange(mint, maxt) + if err != nil { + return nil, err + } return storage.NewMergeChunkQuerier(blockQueriers, nil, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)), nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go index c2da86afa17..1e17c2e37d1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go @@ -79,7 +79,7 @@ func (e *Encbuf) PutUvarintStr(s string) { e.PutString(s) } -// PutUvarintBytes writes a a variable length byte buffer. +// PutUvarintBytes writes a variable length byte buffer. func (e *Encbuf) PutUvarintBytes(b []byte) { e.PutUvarint(len(b)) e.PutBytes(b) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go index c1876d4bb73..697d2e54249 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/flock_js.go @@ -18,8 +18,7 @@ package fileutil import "errors" -type unixLock struct { -} +type unixLock struct{} func (l *unixLock) Release() error { return errors.New("unsupported") diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 5663e3cf90d..8dd15116391 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -25,12 +25,14 @@ import ( "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -61,20 +63,25 @@ var ( type Head struct { chunkRange atomic.Int64 numSeries atomic.Uint64 - minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. + minOOOTime, maxOOOTime atomic.Int64 // TODO(jesusvazquez) These should be updated after garbage collection. + minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. TODO(jesusvazquez) Ensure these are properly tracked. minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block. lastWALTruncationTime atomic.Int64 lastMemoryTruncationTime atomic.Int64 lastSeriesID atomic.Uint64 + // All the ooo m-map chunks should be after this. This is used to truncate old ooo m-map chunks. + // This should be typecasted to chunks.ChunkDiskMapperRef after loading. + minOOOMmapRef atomic.Uint64 metrics *headMetrics opts *HeadOptions - wal *wal.WAL + wal, wbl *wal.WAL exemplarMetrics *ExemplarMetrics exemplars ExemplarStorage logger log.Logger appendPool sync.Pool exemplarsPool sync.Pool + metadataPool sync.Pool seriesPool sync.Pool bytesPool sync.Pool memChunkPool sync.Pool @@ -85,6 +92,7 @@ type Head struct { deletedMtx sync.Mutex deleted map[chunks.HeadSeriesRef]int // Deleted series, and what WAL segment they must be kept until. + // TODO(codesome): Extend MemPostings to return only OOOPostings, Set OOOStatus, ... Like an additional map of ooo postings. postings *index.MemPostings // Postings lists for terms. tombstones *tombstones.MemTombstones @@ -128,6 +136,8 @@ type HeadOptions struct { ChunkPool chunkenc.Pool ChunkWriteBufferSize int ChunkWriteQueueSize int + OutOfOrderTimeWindow atomic.Int64 + OutOfOrderCapMax atomic.Int64 // StripeSize sets the number of entries in the hash map, it must be a power of 2. // A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series. @@ -140,8 +150,13 @@ type HeadOptions struct { IsolationDisabled bool } +const ( + // DefaultOutOfOrderCapMax is the default maximum size of an in-memory out-of-order chunk. + DefaultOutOfOrderCapMax int64 = 32 +) + func DefaultHeadOptions() *HeadOptions { - return &HeadOptions{ + ho := &HeadOptions{ ChunkRange: DefaultBlockDuration, ChunkDirRoot: "", ChunkPool: chunkenc.NewPool(), @@ -151,6 +166,8 @@ func DefaultHeadOptions() *HeadOptions { SeriesCallback: &noopSeriesLifecycleCallback{}, IsolationDisabled: defaultIsolationDisabled, } + ho.OutOfOrderCapMax.Store(DefaultOutOfOrderCapMax) + return ho } // SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. @@ -169,11 +186,23 @@ type SeriesLifecycleCallback interface { } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) { +func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) { var err error if l == nil { l = log.NewNopLogger() } + + if opts.OutOfOrderTimeWindow.Load() < 0 { + opts.OutOfOrderTimeWindow.Store(0) + } + + // Time window can be set on runtime. So the capMin and capMax should be valid + // even if ooo is not enabled yet. + capMax := opts.OutOfOrderCapMax.Load() + if capMax <= 0 || capMax > 255 { + return nil, errors.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax) + } + if opts.ChunkRange < 1 { return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange) } @@ -191,6 +220,7 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti h := &Head{ wal: wal, + wbl: wbl, logger: l, opts: opts, memChunkPool: sync.Pool{ @@ -252,35 +282,40 @@ func (h *Head) resetInMemoryState() error { h.chunkRange.Store(h.opts.ChunkRange) h.minTime.Store(math.MaxInt64) h.maxTime.Store(math.MinInt64) + h.minOOOTime.Store(math.MaxInt64) + h.maxOOOTime.Store(math.MinInt64) h.lastWALTruncationTime.Store(math.MinInt64) h.lastMemoryTruncationTime.Store(math.MinInt64) return nil } type headMetrics struct { - activeAppenders prometheus.Gauge - series prometheus.GaugeFunc - seriesCreated prometheus.Counter - seriesRemoved prometheus.Counter - seriesNotFound prometheus.Counter - chunks prometheus.Gauge - chunksCreated prometheus.Counter - chunksRemoved prometheus.Counter - gcDuration prometheus.Summary - samplesAppended prometheus.Counter - outOfBoundSamples prometheus.Counter - outOfOrderSamples prometheus.Counter - walTruncateDuration prometheus.Summary - walCorruptionsTotal prometheus.Counter - walTotalReplayDuration prometheus.Gauge - headTruncateFail prometheus.Counter - headTruncateTotal prometheus.Counter - checkpointDeleteFail prometheus.Counter - checkpointDeleteTotal prometheus.Counter - checkpointCreationFail prometheus.Counter - checkpointCreationTotal prometheus.Counter - mmapChunkCorruptionTotal prometheus.Counter - snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. + activeAppenders prometheus.Gauge + series prometheus.GaugeFunc + seriesCreated prometheus.Counter + seriesRemoved prometheus.Counter + seriesNotFound prometheus.Counter + chunks prometheus.Gauge + chunksCreated prometheus.Counter + chunksRemoved prometheus.Counter + gcDuration prometheus.Summary + samplesAppended prometheus.Counter + outOfOrderSamplesAppended prometheus.Counter + outOfBoundSamples prometheus.Counter + outOfOrderSamples prometheus.Counter + tooOldSamples prometheus.Counter + walTruncateDuration prometheus.Summary + walCorruptionsTotal prometheus.Counter + dataTotalReplayDuration prometheus.Gauge + headTruncateFail prometheus.Counter + headTruncateTotal prometheus.Counter + checkpointDeleteFail prometheus.Counter + checkpointDeleteTotal prometheus.Counter + checkpointCreationFail prometheus.Counter + checkpointCreationTotal prometheus.Counter + mmapChunkCorruptionTotal prometheus.Counter + snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. + oooHistogram prometheus.Histogram } func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { @@ -331,7 +366,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_wal_corruptions_total", Help: "Total number of WAL corruptions.", }), - walTotalReplayDuration: prometheus.NewGauge(prometheus.GaugeOpts{ + dataTotalReplayDuration: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_tsdb_data_replay_duration_seconds", Help: "Time taken to replay the data on disk.", }), @@ -339,13 +374,21 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_head_samples_appended_total", Help: "Total number of appended samples.", }), + outOfOrderSamplesAppended: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_out_of_order_samples_appended_total", + Help: "Total number of appended out of order samples.", + }), outOfBoundSamples: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_out_of_bound_samples_total", - Help: "Total number of out of bound samples ingestion failed attempts.", + Help: "Total number of out of bound samples ingestion failed attempts with out of order support disabled.", }), outOfOrderSamples: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_out_of_order_samples_total", - Help: "Total number of out of order samples ingestion failed attempts.", + Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.", + }), + tooOldSamples: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_too_old_samples_total", + Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window.", }), headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_truncations_failed_total", @@ -379,6 +422,19 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_snapshot_replay_error_total", Help: "Total number snapshot replays that failed.", }), + oooHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "prometheus_tsdb_sample_ooo_delta", + Help: "Delta in seconds by which a sample is considered out of order (reported regardless of OOO time window and whether sample is accepted or not).", + Buckets: []float64{ + 60 * 10, // 10 min + 60 * 30, // 30 min + 60 * 60, // 60 min + 60 * 60 * 2, // 2h + 60 * 60 * 3, // 3h + 60 * 60 * 6, // 6h + 60 * 60 * 12, // 12h + }, + }), } if r != nil { @@ -394,10 +450,12 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { m.gcDuration, m.walTruncateDuration, m.walCorruptionsTotal, - m.walTotalReplayDuration, + m.dataTotalReplayDuration, m.samplesAppended, + m.outOfOrderSamplesAppended, m.outOfBoundSamples, m.outOfOrderSamples, + m.tooOldSamples, m.headTruncateFail, m.headTruncateTotal, m.checkpointDeleteFail, @@ -515,8 +573,9 @@ func (h *Head) Init(minValidTime int64) error { } mmapChunkReplayStart := time.Now() - mmappedChunks, err := h.loadMmappedChunks(refSeries) + mmappedChunks, oooMmappedChunks, lastMmapRef, err := h.loadMmappedChunks(refSeries) if err != nil { + // TODO(codesome): clear out all m-map chunks here for refSeries. level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { h.metrics.mmapChunkCorruptionTotal.Inc() @@ -527,7 +586,7 @@ func (h *Head) Init(minValidTime int64) error { // If this fails, data will be recovered from WAL. // Hence we wont lose any data (given WAL is not corrupt). - mmappedChunks, err = h.removeCorruptedMmappedChunks(err) + mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err) if err != nil { return err } @@ -570,7 +629,7 @@ func (h *Head) Init(minValidTime int64) error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. - if err := h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks); err != nil { + if err := h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil { return errors.Wrap(err, "backfill checkpoint") } h.updateWALReplayStatusRead(startFrom) @@ -603,7 +662,7 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { return errors.Wrapf(err, "segment reader (offset=%d)", offset) } - err = h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks) + err = h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) } @@ -613,29 +672,98 @@ func (h *Head) Init(minValidTime int64) error { level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } + walReplayDuration := time.Since(walReplayStart) + + wblReplayStart := time.Now() + if h.wbl != nil { + // Replay OOO WAL. + startFrom, endAt, e = wal.Segments(h.wbl.Dir()) + if e != nil { + return errors.Wrap(e, "finding OOO WAL segments") + } + h.startWALReplayStatus(startFrom, endAt) + + for i := startFrom; i <= endAt; i++ { + s, err := wal.OpenReadSegment(wal.SegmentName(h.wbl.Dir(), i)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i)) + } - walReplayDuration := time.Since(start) - h.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds()) + sr := wal.NewSegmentBufReader(s) + err = h.loadWBL(wal.NewReader(sr), multiRef, lastMmapRef) + if err := sr.Close(); err != nil { + level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err) + } + if err != nil { + return err + } + level.Info(h.logger).Log("msg", "WBL segment loaded", "segment", i, "maxSegment", endAt) + h.updateWALReplayStatusRead(i) + } + } + + wblReplayDuration := time.Since(wblReplayStart) + + totalReplayDuration := time.Since(start) + h.metrics.dataTotalReplayDuration.Set(totalReplayDuration.Seconds()) level.Info(h.logger).Log( "msg", "WAL replay completed", "checkpoint_replay_duration", checkpointReplayDuration.String(), - "wal_replay_duration", time.Since(walReplayStart).String(), - "total_replay_duration", walReplayDuration.String(), + "wal_replay_duration", walReplayDuration.String(), + "wbl_replay_duration", wblReplayDuration.String(), + "total_replay_duration", totalReplayDuration.String(), ) return nil } -func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) (map[chunks.HeadSeriesRef][]*mmappedChunk, error) { +func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{} - if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error { - if maxt < h.minValidTime.Load() { + oooMmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{} + var lastRef, secondLastRef chunks.ChunkDiskMapperRef + if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error { + secondLastRef = lastRef + lastRef = chunkRef + isOOO := chunkenc.IsOutOfOrderChunk(encoding) + if !isOOO && maxt < h.minValidTime.Load() { return nil } + + // We ignore any chunk that doesn't have a valid encoding + if !chunkenc.IsValidEncoding(encoding) { + return nil + } + ms, ok := refSeries[seriesRef] + + if isOOO { + if !ok { + oooMmappedChunks[seriesRef] = append(oooMmappedChunks[seriesRef], &mmappedChunk{ + ref: chunkRef, + minTime: mint, + maxTime: maxt, + numSamples: numSamples, + }) + return nil + } + + h.metrics.chunks.Inc() + h.metrics.chunksCreated.Inc() + + ms.oooMmappedChunks = append(ms.oooMmappedChunks, &mmappedChunk{ + ref: chunkRef, + minTime: mint, + maxTime: maxt, + numSamples: numSamples, + }) + + return nil + } + if !ok { slice := mmappedChunks[seriesRef] if len(slice) > 0 && slice[len(slice)-1].maxTime >= mint { + h.metrics.mmapChunkCorruptionTotal.Inc() return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", seriesRef, slice[len(slice)-1].minTime, slice[len(slice)-1].maxTime, mint, maxt) } @@ -650,6 +778,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) } if len(ms.mmappedChunks) > 0 && ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime >= mint { + h.metrics.mmapChunkCorruptionTotal.Inc() return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", seriesRef, ms.mmappedChunks[len(ms.mmappedChunks)-1].minTime, ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime, mint, maxt) @@ -673,45 +802,57 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) } return nil }); err != nil { - return nil, errors.Wrap(err, "iterate on on-disk chunks") + // secondLastRef because the lastRef caused an error. + return nil, nil, secondLastRef, errors.Wrap(err, "iterate on on-disk chunks") } - return mmappedChunks, nil + return mmappedChunks, oooMmappedChunks, lastRef, nil } // removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously // loaded mmapped chunks. -func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, error) { +func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { + level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") // We never want to preserve the in-memory series from snapshots if we are repairing m-map chunks. if err := h.resetInMemoryState(); err != nil { - return nil, err + return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, err } level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") if err := h.chunkDiskMapper.DeleteCorrupted(err); err != nil { level.Info(h.logger).Log("msg", "Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) - if err := h.chunkDiskMapper.Truncate(math.MaxInt64); err != nil { + if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed", "err", err) } - return map[chunks.HeadSeriesRef][]*mmappedChunk{}, nil + return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, nil } level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") - mmappedChunks, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) + mmappedChunks, oooMmappedChunks, lastRef, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) if err != nil { level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err) - if err := h.chunkDiskMapper.Truncate(math.MaxInt64); err != nil { + if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed after failed loading", "err", err) } mmappedChunks = map[chunks.HeadSeriesRef][]*mmappedChunk{} } - return mmappedChunks, nil + return mmappedChunks, oooMmappedChunks, lastRef, nil } -func (h *Head) ApplyConfig(cfg *config.Config) error { +func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) { + oooTimeWindow := int64(0) + if cfg.StorageConfig.TSDBConfig != nil { + oooTimeWindow = cfg.StorageConfig.TSDBConfig.OutOfOrderTimeWindow + } + if oooTimeWindow < 0 { + oooTimeWindow = 0 + } + + h.SetOutOfOrderTimeWindow(oooTimeWindow, wbl) + if !h.opts.EnableExemplarStorage { - return nil + return } // Head uses opts.MaxExemplars in combination with opts.EnableExemplarStorage @@ -722,12 +863,21 @@ func (h *Head) ApplyConfig(cfg *config.Config) error { newSize := h.opts.MaxExemplars.Load() if prevSize == newSize { - return nil + return } migrated := h.exemplars.(*CircularExemplarStorage).Resize(newSize) level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) - return nil +} + +// SetOutOfOrderTimeWindow updates the out of order related parameters. +// If the Head already has a WBL set, then the wbl will be ignored. +func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wal.WAL) { + if oooTimeWindow > 0 && h.wbl == nil { + h.wbl = wbl + } + + h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow) } // PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. @@ -754,7 +904,7 @@ func (h *Head) updateMinMaxTime(mint, maxt int64) { if mint >= lt { break } - if h.minTime.CAS(lt, mint) { + if h.minTime.CompareAndSwap(lt, mint) { break } } @@ -763,7 +913,28 @@ func (h *Head) updateMinMaxTime(mint, maxt int64) { if maxt <= ht { break } - if h.maxTime.CAS(ht, maxt) { + if h.maxTime.CompareAndSwap(ht, maxt) { + break + } + } +} + +func (h *Head) updateMinOOOMaxOOOTime(mint, maxt int64) { + for { + lt := h.MinOOOTime() + if mint >= lt { + break + } + if h.minOOOTime.CompareAndSwap(lt, mint) { + break + } + } + for { + ht := h.MaxOOOTime() + if maxt <= ht { + break + } + if h.maxOOOTime.CompareAndSwap(ht, maxt) { break } } @@ -824,7 +995,7 @@ func (h *Head) truncateMemory(mint int64) (err error) { // Ensure that max time is at least as high as min time. for h.MaxTime() < mint { - h.maxTime.CAS(h.MaxTime(), mint) + h.maxTime.CompareAndSwap(h.MaxTime(), mint) } // This was an initial call to Truncate after loading blocks on startup. @@ -834,30 +1005,7 @@ func (h *Head) truncateMemory(mint int64) (err error) { } h.metrics.headTruncateTotal.Inc() - start := time.Now() - - actualMint := h.gc() - level.Info(h.logger).Log("msg", "Head GC completed", "duration", time.Since(start)) - h.metrics.gcDuration.Observe(time.Since(start).Seconds()) - if actualMint > h.minTime.Load() { - // The actual mint of the Head is higher than the one asked to truncate. - appendableMinValidTime := h.appendableMinValidTime() - if actualMint < appendableMinValidTime { - h.minTime.Store(actualMint) - h.minValidTime.Store(actualMint) - } else { - // The actual min time is in the appendable window. - // So we set the mint to the appendableMinValidTime. - h.minTime.Store(appendableMinValidTime) - h.minValidTime.Store(appendableMinValidTime) - } - } - - // Truncate the chunk m-mapper. - if err := h.chunkDiskMapper.Truncate(mint); err != nil { - return errors.Wrap(err, "truncate chunks.HeadReadWriter") - } - return nil + return h.truncateSeriesAndChunkDiskMapper("truncateMemory") } // WaitForPendingReadersInTimeRange waits for queries overlapping with given range to finish querying. @@ -882,6 +1030,13 @@ func (h *Head) WaitForPendingReadersInTimeRange(mint, maxt int64) { } } +// WaitForAppendersOverlapping waits for appends overlapping maxt to finish. +func (h *Head) WaitForAppendersOverlapping(maxt int64) { + for maxt >= h.iso.lowestAppendTime() { + time.Sleep(500 * time.Millisecond) + } +} + // IsQuerierCollidingWithTruncation returns if the current querier needs to be closed and if a new querier // has to be created. In the latter case, the method also returns the new mint to be used for creating the // new range head and the new querier. This methods helps preventing races with the truncation of in-memory data. @@ -946,7 +1101,7 @@ func (h *Head) truncateWAL(mint int64) error { } // Start a new segment, so low ingestion volume TSDB don't have more WAL than // needed. - if err := h.wal.NextSegment(); err != nil { + if _, err := h.wal.NextSegment(); err != nil { return errors.Wrap(err, "next segment") } last-- // Never consider last segment for checkpoint. @@ -1012,6 +1167,59 @@ func (h *Head) truncateWAL(mint int64) error { return nil } +// truncateOOO +// - truncates the OOO WBL files whose index is strictly less than lastWBLFile. +// - garbage collects all the m-map chunks from the memory that are less than or equal to minOOOMmapRef +// and then deletes the series that do not have any data anymore. +func (h *Head) truncateOOO(lastWBLFile int, minOOOMmapRef chunks.ChunkDiskMapperRef) error { + curMinOOOMmapRef := chunks.ChunkDiskMapperRef(h.minOOOMmapRef.Load()) + if minOOOMmapRef.GreaterThan(curMinOOOMmapRef) { + h.minOOOMmapRef.Store(uint64(minOOOMmapRef)) + if err := h.truncateSeriesAndChunkDiskMapper("truncateOOO"); err != nil { + return err + } + } + + return h.wbl.Truncate(lastWBLFile) +} + +// truncateSeriesAndChunkDiskMapper is a helper function for truncateMemory and truncateOOO. +// It runs GC on the Head and truncates the ChunkDiskMapper accordingly. +func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { + start := time.Now() + headMaxt := h.MaxTime() + actualMint, minOOOTime, minMmapFile := h.gc() + level.Info(h.logger).Log("msg", "Head GC completed", "caller", caller, "duration", time.Since(start)) + h.metrics.gcDuration.Observe(time.Since(start).Seconds()) + + if actualMint > h.minTime.Load() { + // The actual mint of the head is higher than the one asked to truncate. + appendableMinValidTime := h.appendableMinValidTime() + if actualMint < appendableMinValidTime { + h.minTime.Store(actualMint) + h.minValidTime.Store(actualMint) + } else { + // The actual min time is in the appendable window. + // So we set the mint to the appendableMinValidTime. + h.minTime.Store(appendableMinValidTime) + h.minValidTime.Store(appendableMinValidTime) + } + } + if headMaxt-h.opts.OutOfOrderTimeWindow.Load() < minOOOTime { + // The allowed OOO window is lower than the min OOO time seen during GC. + // So it is possible that some OOO sample was inserted that was less that minOOOTime. + // So we play safe and set it to the min that was possible. + minOOOTime = headMaxt - h.opts.OutOfOrderTimeWindow.Load() + } + h.minOOOTime.Store(minOOOTime) + + // Truncate the chunk m-mapper. + if err := h.chunkDiskMapper.Truncate(uint32(minMmapFile)); err != nil { + return errors.Wrap(err, "truncate chunks.HeadReadWriter by file number") + } + return nil +} + type Stats struct { NumSeries uint64 MinTime, MaxTime int64 @@ -1034,6 +1242,8 @@ func (h *Head) Stats(statsByLabelName string) *Stats { type RangeHead struct { head *Head mint, maxt int64 + + isolationOff bool } // NewRangeHead returns a *RangeHead. @@ -1046,12 +1256,23 @@ func NewRangeHead(head *Head, mint, maxt int64) *RangeHead { } } +// NewRangeHeadWithIsolationDisabled returns a *RangeHead that does not create an isolationState. +func NewRangeHeadWithIsolationDisabled(head *Head, mint, maxt int64) *RangeHead { + rh := NewRangeHead(head, mint, maxt) + rh.isolationOff = true + return rh +} + func (h *RangeHead) Index() (IndexReader, error) { return h.head.indexRange(h.mint, h.maxt), nil } func (h *RangeHead) Chunks() (ChunkReader, error) { - return h.head.chunksRange(h.mint, h.maxt, h.head.iso.State(h.mint, h.maxt)) + var isoState *isolationState + if !h.isolationOff { + isoState = h.head.iso.State(h.mint, h.maxt) + } + return h.head.chunksRange(h.mint, h.maxt, isoState) } func (h *RangeHead) Tombstones() (tombstones.Reader, error) { @@ -1113,6 +1334,10 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { var stones []tombstones.Stone for p.Next() { series := h.series.getByID(chunks.HeadSeriesRef(p.At())) + if series == nil { + level.Debug(h.logger).Log("msg", "Series not found in Head.Delete") + continue + } series.RLock() t0, t1 := series.minTime(), series.maxTime() @@ -1141,14 +1366,20 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { } // gc removes data before the minimum timestamp from the head. -// It returns the actual min times of the chunks present in the Head. -func (h *Head) gc() int64 { +// It returns +// * The actual min times of the chunks present in the Head. +// * The min OOO time seen during the GC. +// * Min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. +func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { // Only data strictly lower than this timestamp must be deleted. mint := h.MinTime() + // Only ooo m-map chunks strictly lower than or equal to this ref + // must be deleted. + minOOOMmapRef := chunks.ChunkDiskMapperRef(h.minOOOMmapRef.Load()) // Drop old chunks and remember series IDs and hashes if they can be // deleted entirely. - deleted, chunksRemoved, actualMint := h.series.gc(mint) + deleted, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef) seriesRemoved := len(deleted) h.metrics.seriesRemoved.Add(float64(seriesRemoved)) @@ -1178,7 +1409,7 @@ func (h *Head) gc() int64 { h.deletedMtx.Unlock() } - return actualMint + return actualInOrderMint, minOOOTime, minMmapFile } // Tombstones returns a new reader over the head's tombstones @@ -1216,6 +1447,18 @@ func (h *Head) MaxTime() int64 { return h.maxTime.Load() } +// MinOOOTime returns the lowest time bound on visible data in the out of order +// head. +func (h *Head) MinOOOTime() int64 { + return h.minOOOTime.Load() +} + +// MaxOOOTime returns the highest timestamp on visible data in the out of order +// head. +func (h *Head) MaxOOOTime() int64 { + return h.maxOOOTime.Load() +} + // compactable returns whether the head has a compactable range. // The head has a compactable range when the head time range is 1.5 times the chunk range. // The 0.5 acts as a buffer of the appendable window. @@ -1233,6 +1476,9 @@ func (h *Head) Close() error { if h.wal != nil { errs.Add(h.wal.Close()) } + if h.wbl != nil { + errs.Add(h.wbl.Close()) + } if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown { errs.Add(h.performChunkSnapshot()) } @@ -1263,7 +1509,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { - return newMemSeries(lset, id, h.chunkRange.Load(), &h.memChunkPool, h.opts.IsolationDisabled) + return newMemSeries(lset, id, h.opts.IsolationDisabled) }) if err != nil { return nil, false, err @@ -1325,7 +1571,7 @@ const ( ) // stripeSeries holds series by HeadSeriesRef ("ID") and also by hash of their labels. -// ID-based lookups via (getByID()) are preferred over getByHash() for performance reasons. +// ID-based lookups via getByID() are preferred over getByHash() for performance reasons. // It locks modulo ranges of IDs and hashes to reduce lock contention. // The locks are padded to not be on the same cache line. Filling the padded space // with the maps was profiled to be slower – likely due to the additional pointer @@ -1367,13 +1613,16 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // note: returning map[chunks.HeadSeriesRef]struct{} would be more accurate, // but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct // and there's no easy way to cast maps. -func (s *stripeSeries) gc(mint int64) (map[storage.SeriesRef]struct{}, int, int64) { +// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. +func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { var ( deleted = map[storage.SeriesRef]struct{}{} deletedForCallback = []labels.Labels{} rmChunks = 0 actualMint int64 = math.MaxInt64 + minOOOTime int64 = math.MaxInt64 ) + minMmapFile = math.MaxInt32 // Run through all series and truncate old chunks. Mark those with no // chunks left as deleted and store their ID. for i := 0; i < s.size; i++ { @@ -1382,9 +1631,32 @@ func (s *stripeSeries) gc(mint int64) (map[storage.SeriesRef]struct{}, int, int6 for hash, all := range s.hashes[i] { for _, series := range all { series.Lock() - rmChunks += series.truncateChunksBefore(mint) + rmChunks += series.truncateChunksBefore(mint, minOOOMmapRef) - if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit { + if len(series.mmappedChunks) > 0 { + seq, _ := series.mmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + } + if len(series.oooMmappedChunks) > 0 { + seq, _ := series.oooMmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + for _, ch := range series.oooMmappedChunks { + if ch.minTime < minOOOTime { + minOOOTime = ch.minTime + } + } + } + if series.oooHeadChunk != nil { + if series.oooHeadChunk.minTime < minOOOTime { + minOOOTime = series.oooHeadChunk.minTime + } + } + if len(series.mmappedChunks) > 0 || len(series.oooMmappedChunks) > 0 || + series.headChunk != nil || series.oooHeadChunk != nil || series.pendingCommit { seriesMint := series.minTime() if seriesMint < actualMint { actualMint = seriesMint @@ -1427,7 +1699,7 @@ func (s *stripeSeries) gc(mint int64) (map[storage.SeriesRef]struct{}, int, int6 actualMint = mint } - return deleted, rmChunks, actualMint + return deleted, rmChunks, actualMint, minOOOTime, minMmapFile } func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries { @@ -1508,6 +1780,7 @@ type memSeries struct { ref chunks.HeadSeriesRef lset labels.Labels + meta *metadata.Metadata // Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps. // When compaction runs, chunks get moved into a block and all pointers are shifted like so: @@ -1519,38 +1792,36 @@ type memSeries struct { // // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N mmappedChunks []*mmappedChunk + headChunk *memChunk // Most recent chunk in memory that's still being built. + firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0] + + oooMmappedChunks []*mmappedChunk // Immutable chunks on disk containing OOO samples. + oooHeadChunk *oooHeadChunk // Most recent chunk for ooo samples in memory that's still being built. + firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0] - mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - headChunk *memChunk // Most recent chunk in memory that's still being built. - chunkRange int64 - firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0] + mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. nextAt int64 // Timestamp at which to cut the next chunk. - // We keep the last 4 samples here (in addition to appending them to the chunk) so we don't need coordination between appender and querier. - // Even the most compact encoding of a sample takes 2 bits, so the last byte is not contended. - sampleBuf [4]sample - - pendingCommit bool // Whether there are samples waiting to be committed to this series. + // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. + lastValue float64 // Current appender for the head chunk. Set when a new head chunk is cut. // It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit // (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series). app chunkenc.Appender - memChunkPool *sync.Pool - // txs is nil if isolation is disabled. txs *txRing + + pendingCommit bool // Whether there are samples waiting to be committed to this series. } -func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, chunkRange int64, memChunkPool *sync.Pool, isolationDisabled bool) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, isolationDisabled bool) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - chunkRange: chunkRange, - nextAt: math.MinInt64, - memChunkPool: memChunkPool, + lset: lset, + ref: id, + nextAt: math.MinInt64, } if !isolationDisabled { s.txs = newTxRing(4) @@ -1569,6 +1840,7 @@ func (s *memSeries) minTime() int64 { } func (s *memSeries) maxTime() int64 { + // The highest timestamps will always be in the regular (non-OOO) chunks, even if OOO is enabled. c := s.head() if c != nil { return c.maxTime @@ -1582,26 +1854,39 @@ func (s *memSeries) maxTime() int64 { // truncateChunksBefore removes all chunks from the series that // have no timestamp at or after mint. // Chunk IDs remain unchanged. -func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { +func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) int { + var removedInOrder int if s.headChunk != nil && s.headChunk.maxTime < mint { // If head chunk is truncated, we can truncate all mmapped chunks. - removed = 1 + len(s.mmappedChunks) - s.firstChunkID += chunks.HeadChunkID(removed) + removedInOrder = 1 + len(s.mmappedChunks) + s.firstChunkID += chunks.HeadChunkID(removedInOrder) s.headChunk = nil s.mmappedChunks = nil - return removed } if len(s.mmappedChunks) > 0 { for i, c := range s.mmappedChunks { if c.maxTime >= mint { break } - removed = i + 1 + removedInOrder = i + 1 + } + s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[removedInOrder:]...) + s.firstChunkID += chunks.HeadChunkID(removedInOrder) + } + + var removedOOO int + if len(s.oooMmappedChunks) > 0 { + for i, c := range s.oooMmappedChunks { + if c.ref.GreaterThan(minOOOMmapRef) { + break + } + removedOOO = i + 1 } - s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[removed:]...) - s.firstChunkID += chunks.HeadChunkID(removed) + s.oooMmappedChunks = append(s.oooMmappedChunks[:0], s.oooMmappedChunks[removedOOO:]...) + s.firstOOOChunkID += chunks.HeadChunkID(removedOOO) } - return removed + + return removedInOrder + removedOOO } // cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after @@ -1621,6 +1906,16 @@ type memChunk struct { minTime, maxTime int64 } +type oooHeadChunk struct { + chunk *OOOChunk + minTime, maxTime int64 // can probably be removed and pulled out of the chunk instead +} + +// OverlapsClosedInterval returns true if the chunk overlaps [mint, maxt]. +func (mc *oooHeadChunk) OverlapsClosedInterval(mint, maxt int64) bool { + return overlapsClosedInterval(mc.minTime, mc.maxTime, mint, maxt) +} + // OverlapsClosedInterval returns true if the chunk overlaps [mint, maxt]. func (mc *memChunk) OverlapsClosedInterval(mint, maxt int64) bool { return overlapsClosedInterval(mc.minTime, mc.maxTime, mint, maxt) @@ -1649,12 +1944,15 @@ func (noopSeriesLifecycleCallback) PostCreation(labels.Labels) {} func (noopSeriesLifecycleCallback) PostDeletion(...labels.Labels) {} func (h *Head) Size() int64 { - var walSize int64 + var walSize, wblSize int64 if h.wal != nil { walSize, _ = h.wal.Size() } + if h.wbl != nil { + wblSize, _ = h.wbl.Size() + } cdmSize, _ := h.chunkDiskMapper.Size() - return walSize + cdmSize + return walSize + wblSize + cdmSize } func (h *RangeHead) Size() int64 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index 13d56de7bf2..f843aa1ec69 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -23,6 +23,7 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -65,15 +66,24 @@ func (a *initAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e return a.app.AppendExemplar(ref, l, e) } +func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.UpdateMetadata(ref, l, m) + } + + a.app = a.head.appender() + return a.app.UpdateMetadata(ref, l, m) +} + // initTime initializes a head with the first timestamp. This only needs to be called // for a completely fresh head with an empty WAL. func (h *Head) initTime(t int64) { - if !h.minTime.CAS(math.MaxInt64, t) { + if !h.minTime.CompareAndSwap(math.MaxInt64, t) { return } // Ensure that max time is initialized to at least the min time we just set. // Concurrent appenders may already have set it to a higher value. - h.maxTime.CAS(math.MinInt64, t) + h.maxTime.CompareAndSwap(math.MinInt64, t) } func (a *initAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { @@ -114,7 +124,8 @@ func (h *Head) Appender(_ context.Context) storage.Appender { } func (h *Head) appender() *headAppender { - appendID, cleanupAppendIDsBelow := h.iso.newAppendID() // Every appender gets an ID that is cleared upon commit/rollback. + minValidTime := h.appendableMinValidTime() + appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback. // Allocate the exemplars buffer only if exemplars are enabled. var exemplarsBuf []exemplarWithSeriesRef @@ -124,12 +135,15 @@ func (h *Head) appender() *headAppender { return &headAppender{ head: h, - minValidTime: h.appendableMinValidTime(), + minValidTime: minValidTime, mint: math.MaxInt64, maxt: math.MinInt64, + headMaxt: h.MaxTime(), + oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(), samples: h.getAppendBuffer(), sampleSeries: h.getSeriesBuffer(), exemplars: exemplarsBuf, + metadata: h.getMetadataBuffer(), appendID: appendID, cleanupAppendIDsBelow: cleanupAppendIDsBelow, } @@ -196,6 +210,19 @@ func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) { h.exemplarsPool.Put(b[:0]) } +func (h *Head) getMetadataBuffer() []record.RefMetadata { + b := h.metadataPool.Get() + if b == nil { + return make([]record.RefMetadata, 0, 512) + } + return b.([]record.RefMetadata) +} + +func (h *Head) putMetadataBuffer(b []record.RefMetadata) { + //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + h.metadataPool.Put(b[:0]) +} + func (h *Head) getSeriesBuffer() []*memSeries { b := h.seriesPool.Get() if b == nil { @@ -228,21 +255,27 @@ type exemplarWithSeriesRef struct { } type headAppender struct { - head *Head - minValidTime int64 // No samples below this timestamp are allowed. - mint, maxt int64 - - series []record.RefSeries // New series held by this appender. - samples []record.RefSample // New samples held by this appender. - exemplars []exemplarWithSeriesRef // New exemplars held by this appender. - sampleSeries []*memSeries // Series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). + head *Head + minValidTime int64 // No samples below this timestamp are allowed. + mint, maxt int64 + headMaxt int64 // We track it here to not take the lock for every sample appended. + oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample. + + series []record.RefSeries // New series held by this appender. + metadata []record.RefMetadata // New metadata held by this appender. + samples []record.RefSample // New samples held by this appender. + exemplars []exemplarWithSeriesRef // New exemplars held by this appender. + sampleSeries []*memSeries // Series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once). + metadataSeries []*memSeries // Series corresponding to the metadata held by this appender. appendID, cleanupAppendIDsBelow uint64 closed bool } func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - if t < a.minValidTime { + // For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append. + // If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work. + if a.oooTimeWindow == 0 && t < a.minValidTime { a.head.metrics.outOfBoundSamples.Inc() return 0, storage.ErrOutOfBounds } @@ -274,15 +307,25 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } s.Lock() - if err := s.appendable(t, v); err != nil { - s.Unlock() - if err == storage.ErrOutOfOrderSample { + // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise + // to skip that sample from the WAL and write only in the WBL. + _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) + if err == nil { + s.pendingCommit = true + } + s.Unlock() + if delta > 0 { + a.head.metrics.oooHistogram.Observe(float64(delta)) + } + if err != nil { + switch err { + case storage.ErrOutOfOrderSample: a.head.metrics.outOfOrderSamples.Inc() + case storage.ErrTooOldSample: + a.head.metrics.tooOldSamples.Inc() } return 0, err } - s.pendingCommit = true - s.Unlock() if t < a.mint { a.mint = t @@ -300,25 +343,46 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 return storage.SeriesRef(s.ref), nil } -// appendable checks whether the given sample is valid for appending to the series. -func (s *memSeries) appendable(t int64, v float64) error { - c := s.head() - if c == nil { - return nil +// appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) +// The sample belongs to the out of order chunk if we return true and no error. +// An error signifies the sample cannot be handled. +func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { + // Check if we can append in the in-order chunk. + if t >= minValidTime { + if s.head() == nil { + // The series has no sample and was freshly created. + return false, 0, nil + } + msMaxt := s.maxTime() + if t > msMaxt { + return false, 0, nil + } + if t == msMaxt { + // We are allowing exact duplicates as we can encounter them in valid cases + // like federation and erroring out at that time would be extremely noisy. + // This only checks against the latest in-order sample. + // The OOO headchunk has its own method to detect these duplicates. + if math.Float64bits(s.lastValue) != math.Float64bits(v) { + return false, 0, storage.ErrDuplicateSampleForTimestamp + } + // Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf. + return false, 0, nil + } } - if t > c.maxTime { - return nil + // The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk. + if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow { + return true, headMaxt - t, nil } - if t < c.maxTime { - return storage.ErrOutOfOrderSample + + // The sample cannot go in both in-order and out-of-order chunk. + if oooTimeWindow > 0 { + return true, headMaxt - t, storage.ErrTooOldSample } - // We are allowing exact duplicates as we can encounter them in valid cases - // like federation and erroring out at that time would be extremely noisy. - if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) { - return storage.ErrDuplicateSampleForTimestamp + if t < minValidTime { + return false, headMaxt - t, storage.ErrOutOfBounds } - return nil + return false, headMaxt - t, storage.ErrOutOfOrderSample } // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't @@ -358,6 +422,37 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, return storage.SeriesRef(s.ref), nil } +// UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't +// use getOrCreate or make any of the lset sanity checks that Append does. +func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + s = a.head.series.getByHash(lset.Hash(), lset) + if s != nil { + ref = storage.SeriesRef(s.ref) + } + } + if s == nil { + return 0, fmt.Errorf("unknown series when trying to add metadata with HeadSeriesRef: %d and labels: %s", ref, lset) + } + + s.RLock() + hasNewMetadata := s.meta == nil || *s.meta != meta + s.RUnlock() + + if hasNewMetadata { + a.metadata = append(a.metadata, record.RefMetadata{ + Ref: s.ref, + Type: record.GetMetricType(meta.Type), + Unit: meta.Unit, + Help: meta.Help, + }) + a.metadataSeries = append(a.metadataSeries, s) + } + + return ref, nil +} + var _ storage.GetRef = &headAppender{} func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) { @@ -389,6 +484,14 @@ func (a *headAppender) log() error { return errors.Wrap(err, "log series") } } + if len(a.metadata) > 0 { + rec = enc.Metadata(a.metadata, buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log metadata") + } + } if len(a.samples) > 0 { rec = enc.Samples(a.samples, buf) buf = rec[:0] @@ -422,6 +525,7 @@ func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { } // Commit writes to the WAL and adds the data to the Head. +// TODO(codesome): Refactor this method to reduce indentation and make it more readable. func (a *headAppender) Commit() (err error) { if a.closed { return ErrAppenderClosed @@ -449,39 +553,204 @@ func (a *headAppender) Commit() (err error) { defer a.head.putAppendBuffer(a.samples) defer a.head.putSeriesBuffer(a.sampleSeries) defer a.head.putExemplarBuffer(a.exemplars) + defer a.head.putMetadataBuffer(a.metadata) defer a.head.iso.closeAppend(a.appendID) - total := len(a.samples) - var series *memSeries + var ( + samplesAppended = len(a.samples) + oooAccepted int // number of samples out of order but accepted: with ooo enabled and within time window + oooRejected int // number of samples rejected due to: out of order but OOO support disabled. + tooOldRejected int // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window) + oobRejected int // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) + inOrderMint int64 = math.MaxInt64 + inOrderMaxt int64 = math.MinInt64 + ooomint int64 = math.MaxInt64 + ooomaxt int64 = math.MinInt64 + wblSamples []record.RefSample + oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef + oooRecords [][]byte + oooCapMax = a.head.opts.OutOfOrderCapMax.Load() + chunkRange = a.head.chunkRange.Load() + series *memSeries + enc record.Encoder + ) + defer func() { + for i := range oooRecords { + a.head.putBytesBuffer(oooRecords[i][:0]) + } + }() + collectOOORecords := func() { + if a.head.wbl == nil { + // WBL is not enabled. So no need to collect. + wblSamples = nil + oooMmapMarkers = nil + return + } + // The m-map happens before adding a new sample. So we collect + // the m-map markers first, and then samples. + // WBL Graphically: + // WBL Before this Commit(): [old samples before this commit for chunk 1] + // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] + if oooMmapMarkers != nil { + markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers)) + for ref, mmapRef := range oooMmapMarkers { + markers = append(markers, record.RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) + } + r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) + oooRecords = append(oooRecords, r) + } + + if len(wblSamples) > 0 { + r := enc.Samples(wblSamples, a.head.getBytesBuffer()) + oooRecords = append(oooRecords, r) + } + + wblSamples = nil + oooMmapMarkers = nil + } for i, s := range a.samples { series = a.sampleSeries[i] series.Lock() - ok, chunkCreated := series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper) - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - if !ok { - total-- - a.head.metrics.outOfOrderSamples.Inc() + oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) + switch err { + case storage.ErrOutOfOrderSample: + samplesAppended-- + oooRejected++ + case storage.ErrOutOfBounds: + samplesAppended-- + oobRejected++ + case storage.ErrTooOldSample: + samplesAppended-- + tooOldRejected++ + case nil: + // Do nothing. + default: + samplesAppended-- } + + var ok, chunkCreated bool + + if err == nil && oooSample { + // Sample is OOO and OOO handling is enabled + // and the delta is within the OOO tolerance. + var mmapRef chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax) + if chunkCreated { + r, ok := oooMmapMarkers[series.ref] + if !ok || r != 0 { + // !ok means there are no markers collected for these samples yet. So we first flush the samples + // before setting this m-map marker. + + // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). + // Hence, before we m-map again, we should add the samples and m-map markers + // seen till now to the WBL records. + collectOOORecords() + } + + if oooMmapMarkers == nil { + oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef) + } + oooMmapMarkers[series.ref] = mmapRef + } + if ok { + wblSamples = append(wblSamples, s) + if s.T < ooomint { + ooomint = s.T + } + if s.T > ooomaxt { + ooomaxt = s.T + } + oooAccepted++ + } else { + // Sample is an exact duplicate of the last sample. + // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, + // not with samples in already flushed OOO chunks. + // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. + samplesAppended-- + } + } else if err == nil { + ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange) + if ok { + if s.T < inOrderMint { + inOrderMint = s.T + } + if s.T > inOrderMaxt { + inOrderMaxt = s.T + } + } else { + // The sample is an exact duplicate, and should be silently dropped. + samplesAppended-- + } + } + if chunkCreated { a.head.metrics.chunks.Inc() a.head.metrics.chunksCreated.Inc() } + + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() } - a.head.metrics.samplesAppended.Add(float64(total)) - a.head.updateMinMaxTime(a.mint, a.maxt) + for i, m := range a.metadata { + series = a.metadataSeries[i] + series.Lock() + series.meta = &metadata.Metadata{Type: record.ToTextparseMetricType(m.Type), Unit: m.Unit, Help: m.Help} + series.Unlock() + } + a.head.metrics.outOfOrderSamples.Add(float64(oooRejected)) + a.head.metrics.outOfBoundSamples.Add(float64(oobRejected)) + a.head.metrics.tooOldSamples.Add(float64(tooOldRejected)) + a.head.metrics.samplesAppended.Add(float64(samplesAppended)) + a.head.metrics.outOfOrderSamplesAppended.Add(float64(oooAccepted)) + a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) + a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt) + + collectOOORecords() + if a.head.wbl != nil { + if err := a.head.wbl.Log(oooRecords...); err != nil { + // TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging + // until we have found what samples become OOO. We can try having a metric for this failure. + // Returning the error here is not correct because we have already put the samples into the memory, + // hence the append/insert was a success. + level.Error(a.head.logger).Log("msg", "Failed to log out of order samples into the WAL", "err", err) + } + } return nil } +// insert is like append, except it inserts. Used for OOO samples. +func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) { + c := s.oooHeadChunk + if c == nil || c.chunk.NumSamples() == int(oooCapMax) { + // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. + c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper) + chunkCreated = true + } + + ok := c.chunk.Insert(t, v) + if ok { + if chunkCreated || t < c.minTime { + c.minTime = t + } + if chunkCreated || t > c.maxTime { + c.maxTime = t + } + } + return ok, chunkCreated, mmapRef +} + // append adds the sample (t, v) to the series. The caller also has to provide // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) // It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper) (sampleInOrder, chunkCreated bool) { +func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) { // Based on Gorilla white papers this offers near-optimal compression ratio // so anything bigger that this has diminishing returns and increases // the time range within which we have to decompress all samples. @@ -494,8 +763,8 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it. return false, false } - // There is no chunk in this series yet, create the first chunk for the sample. - c = s.cutNewHeadChunk(t, chunkDiskMapper) + // There is no head chunk in this series yet, create the first chunk for the sample. + c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange) chunkCreated = true } @@ -509,7 +778,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // It could be the new chunk created after reading the chunk snapshot, // hence we fix the minTime of the chunk here. c.minTime = t - s.nextAt = rangeForTimestamp(c.minTime, s.chunkRange) + s.nextAt = rangeForTimestamp(c.minTime, chunkRange) } // If we reach 25% of a chunk's desired sample count, predict an end time @@ -525,17 +794,13 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // as we expect more chunks to come. // Note that next chunk will have its nextAt recalculated for the new rate. if t >= s.nextAt || numSamples >= samplesPerChunk*2 { - c = s.cutNewHeadChunk(t, chunkDiskMapper) + c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange) chunkCreated = true } s.app.Append(t, v) c.maxTime = t - - s.sampleBuf[0] = s.sampleBuf[1] - s.sampleBuf[1] = s.sampleBuf[2] - s.sampleBuf[2] = s.sampleBuf[3] - s.sampleBuf[3] = sample{t: t, v: v} + s.lastValue = v if appendID > 0 && s.txs != nil { s.txs.add(appendID) @@ -557,7 +822,7 @@ func computeChunkEndTime(start, cur, max int64) int64 { return start + (max-start)/n } -func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) *memChunk { +func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) *memChunk { s.mmapCurrentHeadChunk(chunkDiskMapper) s.headChunk = &memChunk{ @@ -568,7 +833,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDis // Set upper bound on when the next chunk must be started. An earlier timestamp // may be chosen dynamically at a later point. - s.nextAt = rangeForTimestamp(mint, s.chunkRange) + s.nextAt = rangeForTimestamp(mint, chunkRange) app, err := s.headChunk.chunk.Appender() if err != nil { @@ -578,6 +843,36 @@ func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDis return s.headChunk } +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { + ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) + + s.oooHeadChunk = &oooHeadChunk{ + chunk: NewOOOChunk(), + minTime: mint, + maxTime: math.MinInt64, + } + + return s.oooHeadChunk, ref +} + +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef { + if s.oooHeadChunk == nil { + // There is no head chunk, so nothing to m-map here. + return 0 + } + xor, _ := s.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality. + oooXor := &chunkenc.OOOXORChunk{XORChunk: xor} + chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.oooHeadChunk.minTime, s.oooHeadChunk.maxTime, oooXor, handleChunkWriteError) + s.oooMmappedChunks = append(s.oooMmappedChunks, &mmappedChunk{ + ref: chunkRef, + numSamples: uint16(xor.NumSamples()), + minTime: s.oooHeadChunk.minTime, + maxTime: s.oooHeadChunk.maxTime, + }) + s.oooHeadChunk = nil + return chunkRef +} + func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) { if s.headChunk == nil { // There is no head chunk, so nothing to m-map here. @@ -619,8 +914,10 @@ func (a *headAppender) Rollback() (err error) { } a.head.putAppendBuffer(a.samples) a.head.putExemplarBuffer(a.exemplars) + a.head.putMetadataBuffer(a.metadata) a.samples = nil a.exemplars = nil + a.metadata = nil // Series are created in the head memory regardless of rollback. Thus we have // to log them to the WAL in any case. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index cd19c0ef58e..a97537a1fe2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -17,6 +17,7 @@ import ( "context" "math" "sort" + "sync" "github.com/go-kit/log/level" "github.com/pkg/errors" @@ -182,11 +183,20 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, lbls *labels.Labels, chk return nil } -// headChunkID returns the HeadChunkID corresponding to .mmappedChunks[pos] +// headChunkID returns the HeadChunkID referred to by the given position. +// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos] +// * pos == len(s.mmappedChunks) refers to s.headChunk func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { return chunks.HeadChunkID(pos) + s.firstChunkID } +// oooHeadChunkID returns the HeadChunkID referred to by the given position. +// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos] +// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk +func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID { + return chunks.HeadChunkID(pos) + s.firstOOOChunkID +} + // LabelValueFor returns label value for the given label name in the series referred to by ID. func (h *headIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) @@ -252,13 +262,15 @@ type headChunkReader struct { } func (h *headChunkReader) Close() error { - h.isoState.Close() + if h.isoState != nil { + h.isoState.Close() + } return nil } // Chunk returns the chunk for the reference number. -func (h *headChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { - sid, cid := chunks.HeadChunkRef(ref).Unpack() +func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { + sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack() s := h.head.series.getByID(sid) // This means that the series has been garbage collected. @@ -267,7 +279,7 @@ func (h *headChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { } s.Lock() - c, garbageCollect, err := s.chunk(cid, h.head.chunkDiskMapper) + c, garbageCollect, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool) if err != nil { s.Unlock() return nil, err @@ -276,7 +288,7 @@ func (h *headChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { if garbageCollect { // Set this to nil so that Go GC can collect it after it has been used. c.chunk = nil - s.memChunkPool.Put(c) + h.head.memChunkPool.Put(c) } }() @@ -293,13 +305,14 @@ func (h *headChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { cid: cid, isoState: h.isoState, chunkDiskMapper: h.head.chunkDiskMapper, + memChunkPool: &h.head.memChunkPool, }, nil } // chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk. // If garbageCollect is true, it means that the returned *memChunk // (and not the chunkenc.Chunk inside it) can be garbage collected after its usage. -func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper) (chunk *memChunk, garbageCollect bool, err error) { +func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk *memChunk, garbageCollect bool, err error) { // ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are // incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index. // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix @@ -321,32 +334,287 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi } return nil, false, err } - mc := s.memChunkPool.Get().(*memChunk) + mc := memChunkPool.Get().(*memChunk) mc.chunk = chk mc.minTime = s.mmappedChunks[ix].minTime mc.maxTime = s.mmappedChunks[ix].maxTime return mc, true, nil } +// oooMergedChunk returns the requested chunk based on the given chunks.Meta +// reference from memory or by m-mapping it from the disk. The returned chunk +// might be a merge of all the overlapping chunks, if any, amongst all the +// chunks in the OOOHead. +// This function is not thread safe unless the caller holds a lock. +func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) { + _, cid := chunks.HeadChunkRef(meta.Ref).Unpack() + + // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are + // incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index. + // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix + // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. + ix := int(cid) - int(s.firstOOOChunkID) + if ix < 0 || ix > len(s.oooMmappedChunks) { + return nil, storage.ErrNotFound + } + + if ix == len(s.oooMmappedChunks) { + if s.oooHeadChunk == nil { + return nil, errors.New("invalid ooo head chunk") + } + } + + // We create a temporary slice of chunk metas to hold the information of all + // possible chunks that may overlap with the requested chunk. + tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.oooMmappedChunks)) + + oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks)))) + if s.oooHeadChunk != nil && s.oooHeadChunk.OverlapsClosedInterval(mint, maxt) { + // We only want to append the head chunk if this chunk existed when + // Series() was called. This brings consistency in case new data + // is added in between Series() and Chunk() calls. + if oooHeadRef == meta.OOOLastRef { + tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ + meta: chunks.Meta{ + // Ignoring samples added before and after the last known min and max time for this chunk. + MinTime: meta.OOOLastMinTime, + MaxTime: meta.OOOLastMaxTime, + Ref: oooHeadRef, + }, + }) + } + } + + for i, c := range s.oooMmappedChunks { + chunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) + // We can skip chunks that came in later than the last known OOOLastRef. + if chunkRef > meta.OOOLastRef { + break + } + + if chunkRef == meta.OOOLastRef { + tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ + meta: chunks.Meta{ + MinTime: meta.OOOLastMinTime, + MaxTime: meta.OOOLastMaxTime, + Ref: chunkRef, + }, + ref: c.ref, + origMinT: c.minTime, + origMaxT: c.maxTime, + }) + } else if c.OverlapsClosedInterval(mint, maxt) { + tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ + meta: chunks.Meta{ + MinTime: c.minTime, + MaxTime: c.maxTime, + Ref: chunkRef, + }, + ref: c.ref, + }) + } + } + + // Next we want to sort all the collected chunks by min time so we can find + // those that overlap and stop when we know the rest don't. + sort.Sort(byMinTimeAndMinRef(tmpChks)) + + mc := &mergedOOOChunks{} + absoluteMax := int64(math.MinInt64) + for _, c := range tmpChks { + if c.meta.Ref != meta.Ref && (len(mc.chunks) == 0 || c.meta.MinTime > absoluteMax) { + continue + } + if c.meta.Ref == oooHeadRef { + var xor *chunkenc.XORChunk + // If head chunk min and max time match the meta OOO markers + // that means that the chunk has not expanded so we can append + // it as it is. + if s.oooHeadChunk.minTime == meta.OOOLastMinTime && s.oooHeadChunk.maxTime == meta.OOOLastMaxTime { + xor, err = s.oooHeadChunk.chunk.ToXOR() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called. + } else { + // We need to remove samples that are outside of the markers + xor, err = s.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime) + } + if err != nil { + return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") + } + c.meta.Chunk = xor + } else { + chk, err := cdm.Chunk(c.ref) + if err != nil { + if _, ok := err.(*chunks.CorruptionErr); ok { + return nil, errors.Wrap(err, "invalid ooo mmapped chunk") + } + return nil, err + } + if c.meta.Ref == meta.OOOLastRef && + (c.origMinT != meta.OOOLastMinTime || c.origMaxT != meta.OOOLastMaxTime) { + // The head expanded and was memory mapped so now we need to + // wrap the chunk within a chunk that doesnt allows us to iterate + // through samples out of the OOOLastMinT and OOOLastMaxT + // markers. + c.meta.Chunk = boundedChunk{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime} + } else { + c.meta.Chunk = chk + } + } + mc.chunks = append(mc.chunks, c.meta) + if c.meta.MaxTime > absoluteMax { + absoluteMax = c.meta.MaxTime + } + } + + return mc, nil +} + +var _ chunkenc.Chunk = &mergedOOOChunks{} + +// mergedOOOChunks holds the list of overlapping chunks. This struct satisfies +// chunkenc.Chunk. +type mergedOOOChunks struct { + chunks []chunks.Meta +} + +// Bytes is a very expensive method because its calling the iterator of all the +// chunks in the mergedOOOChunk and building a new chunk with the samples. +func (o mergedOOOChunks) Bytes() []byte { + xc := chunkenc.NewXORChunk() + app, err := xc.Appender() + if err != nil { + panic(err) + } + it := o.Iterator(nil) + for it.Next() { + t, v := it.At() + app.Append(t, v) + } + + return xc.Bytes() +} + +func (o mergedOOOChunks) Encoding() chunkenc.Encoding { + return chunkenc.EncXOR +} + +func (o mergedOOOChunks) Appender() (chunkenc.Appender, error) { + return nil, errors.New("can't append to mergedOOOChunks") +} + +func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { + iterators := make([]chunkenc.Iterator, 0, len(o.chunks)) + for _, c := range o.chunks { + iterators = append(iterators, c.Chunk.Iterator(nil)) + } + return storage.NewChainSampleIterator(iterators) +} + +func (o mergedOOOChunks) NumSamples() int { + samples := 0 + for _, c := range o.chunks { + samples += c.Chunk.NumSamples() + } + return samples +} + +func (o mergedOOOChunks) Compact() {} + +var _ chunkenc.Chunk = &boundedChunk{} + +// boundedChunk is an implementation of chunkenc.Chunk that uses a +// boundedIterator that only iterates through samples which timestamps are +// >= minT and <= maxT +type boundedChunk struct { + chunkenc.Chunk + minT int64 + maxT int64 +} + +func (b boundedChunk) Bytes() []byte { + xor := chunkenc.NewXORChunk() + a, _ := xor.Appender() + it := b.Iterator(nil) + for it.Next() { + t, v := it.At() + a.Append(t, v) + } + return xor.Bytes() +} + +func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { + it := b.Chunk.Iterator(iterator) + if it == nil { + panic("iterator shouldn't be nil") + } + return boundedIterator{it, b.minT, b.maxT} +} + +var _ chunkenc.Iterator = &boundedIterator{} + +// boundedIterator is an implementation of Iterator that only iterates through +// samples which timestamps are >= minT and <= maxT +type boundedIterator struct { + chunkenc.Iterator + minT int64 + maxT int64 +} + +// Next the first time its called it will advance as many positions as necessary +// until its able to find a sample within the bounds minT and maxT. +// If there are samples within bounds it will advance one by one amongst them. +// If there are no samples within bounds it will return false. +func (b boundedIterator) Next() bool { + for b.Iterator.Next() { + t, _ := b.Iterator.At() + if t < b.minT { + continue + } else if t > b.maxT { + return false + } + return true + } + return false +} + +func (b boundedIterator) Seek(t int64) bool { + if t < b.minT { + // We must seek at least up to b.minT if it is asked for something before that. + ok := b.Iterator.Seek(b.minT) + if !ok { + return false + } + t, _ := b.Iterator.At() + return t <= b.maxT + } + if t > b.maxT { + // We seek anyway so that the subsequent Next() calls will also return false. + b.Iterator.Seek(t) + return false + } + return b.Iterator.Seek(t) +} + +// safeChunk makes sure that the chunk can be accessed without a race condition type safeChunk struct { chunkenc.Chunk s *memSeries cid chunks.HeadChunkID isoState *isolationState chunkDiskMapper *chunks.ChunkDiskMapper + memChunkPool *sync.Pool } func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { c.s.Lock() - it := c.s.iterator(c.cid, c.isoState, c.chunkDiskMapper, reuseIter) + it := c.s.iterator(c.cid, c.isoState, c.chunkDiskMapper, c.memChunkPool, reuseIter) c.s.Unlock() return it } // iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range. // It is unsafe to call this concurrently with s.append(...) without holding the series lock. -func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, it chunkenc.Iterator) chunkenc.Iterator { - c, garbageCollect, err := s.chunk(id, chunkDiskMapper) +func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool, it chunkenc.Iterator) chunkenc.Iterator { + c, garbageCollect, err := s.chunk(id, chunkDiskMapper, memChunkPool) // TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a // series's chunk, which got then garbage collected before it got // accessed. We must ensure to not garbage collect as long as any @@ -359,7 +627,7 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, ch // Set this to nil so that Go GC can collect it after it has been used. // This should be done always at the end. c.chunk = nil - s.memChunkPool.Put(c) + memChunkPool.Put(c) } }() @@ -409,87 +677,25 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, ch if stopAfter == 0 { return chunkenc.NewNopIterator() } - - if int(id)-int(s.firstChunkID) < len(s.mmappedChunks) { - if stopAfter == numSamples { - return c.chunk.Iterator(it) - } - if msIter, ok := it.(*stopIterator); ok { - msIter.Iterator = c.chunk.Iterator(msIter.Iterator) - msIter.i = -1 - msIter.stopAfter = stopAfter - return msIter - } - return &stopIterator{ - Iterator: c.chunk.Iterator(it), - i: -1, - stopAfter: stopAfter, - } - } - // Serve the last 4 samples for the last chunk from the sample buffer - // as their compressed bytes may be mutated by added samples. - if msIter, ok := it.(*memSafeIterator); ok { - msIter.Iterator = c.chunk.Iterator(msIter.Iterator) - msIter.i = -1 - msIter.total = numSamples - msIter.stopAfter = stopAfter - msIter.buf = s.sampleBuf - return msIter + if stopAfter == numSamples { + return c.chunk.Iterator(it) } - return &memSafeIterator{ - stopIterator: stopIterator{ - Iterator: c.chunk.Iterator(it), - i: -1, - stopAfter: stopAfter, - }, - total: numSamples, - buf: s.sampleBuf, - } -} - -// memSafeIterator returns values from the wrapped stopIterator -// except the last 4, which come from buf. -type memSafeIterator struct { - stopIterator - - total int - buf [4]sample + return makeStopIterator(c.chunk, it, stopAfter) } -func (it *memSafeIterator) Seek(t int64) bool { - if it.Err() != nil { - return false +func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chunkenc.Iterator { + // Re-use the Iterator object if it is a stopIterator. + if stopIter, ok := it.(*stopIterator); ok { + stopIter.Iterator = c.Iterator(stopIter.Iterator) + stopIter.i = -1 + stopIter.stopAfter = stopAfter + return stopIter } - - ts, _ := it.At() - - for t > ts || it.i == -1 { - if !it.Next() { - return false - } - ts, _ = it.At() - } - - return true -} - -func (it *memSafeIterator) Next() bool { - if it.i+1 >= it.stopAfter { - return false - } - it.i++ - if it.total-it.i > 4 { - return it.Iterator.Next() - } - return true -} - -func (it *memSafeIterator) At() (int64, float64) { - if it.total-it.i > 4 { - return it.Iterator.At() + return &stopIterator{ + Iterator: c.Iterator(it), + i: -1, + stopAfter: stopAfter, } - s := it.buf[4-(it.total-it.i)] - return s.t, s.v } // stopIterator wraps an Iterator, but only returns the first diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index a8232ff7d34..0a5ee05995a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -30,6 +30,7 @@ import ( "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" @@ -41,13 +42,14 @@ import ( "github.com/prometheus/prometheus/tsdb/wal" ) -func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { +func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { // Track number of samples that referenced a series we don't know about // for error reporting. var unknownRefs atomic.Uint64 var unknownExemplarRefs atomic.Uint64 + var unknownMetadataRefs atomic.Uint64 // Track number of series records that had overlapping m-map chunks. - var mmapOverlappingChunks uint64 + var mmapOverlappingChunks atomic.Uint64 // Start workers that each process samples for a partition of the series ID space. var ( @@ -81,6 +83,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return []record.RefExemplar{} }, } + metadataPool = sync.Pool{ + New: func() interface{} { + return []record.RefMetadata{} + }, + } ) defer func() { @@ -100,8 +107,9 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H processors[i].setup() go func(wp *walSubsetProcessor) { - unknown := wp.processWALSamples(h) + unknown, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks) unknownRefs.Add(unknown) + mmapOverlappingChunks.Add(overlapping) wg.Done() }(&processors[i]) } @@ -184,6 +192,18 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return } decoded <- exemplars + case record.Metadata: + meta := metadataPool.Get().([]record.RefMetadata)[:0] + meta, err := dec.Metadata(rec, meta) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode metadata"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- meta default: // Noop. } @@ -205,61 +225,18 @@ Outer: if chunks.HeadSeriesRef(h.lastSeriesID.Load()) < walSeries.Ref { h.lastSeriesID.Store(uint64(walSeries.Ref)) } - - idx := uint64(mSeries.ref) % uint64(n) - // It is possible that some old sample is being processed in processWALSamples that - // could cause race below. So we wait for the goroutine to empty input the buffer and finish - // processing all old samples after emptying the buffer. - processors[idx].waitUntilIdle() - // Lock the subset so we can modify the series object - processors[idx].mx.Lock() - - mmc := mmappedChunks[walSeries.Ref] - - if created { - // This is the first WAL series record for this series. - h.resetSeriesWithMMappedChunks(mSeries, mmc) - processors[idx].mx.Unlock() - continue + if !created { + multiRef[walSeries.Ref] = mSeries.ref } - // There's already a different ref for this series. - // A duplicate series record is only possible when the old samples were already compacted into a block. - // Hence we can discard all the samples and m-mapped chunks replayed till now for this series. - - multiRef[walSeries.Ref] = mSeries.ref - - // Checking if the new m-mapped chunks overlap with the already existing ones. - if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 { - if overlapsClosedInterval( - mSeries.mmappedChunks[0].minTime, - mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime, - mmc[0].minTime, - mmc[len(mmc)-1].maxTime, - ) { - mmapOverlappingChunks++ - level.Debug(h.logger).Log( - "msg", "M-mapped chunks overlap on a duplicate series record", - "series", mSeries.lset.String(), - "oldref", mSeries.ref, - "oldmint", mSeries.mmappedChunks[0].minTime, - "oldmaxt", mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime, - "newref", walSeries.Ref, - "newmint", mmc[0].minTime, - "newmaxt", mmc[len(mmc)-1].maxTime, - ) - } - } - - // Replacing m-mapped chunks with the new ones (could be empty). - h.resetSeriesWithMMappedChunks(mSeries, mmc) - - processors[idx].mx.Unlock() + idx := uint64(mSeries.ref) % uint64(n) + processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries} } //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. seriesPool.Put(v) case []record.RefSample: samples := v + minValidTime := h.minValidTime.Load() // We split up the samples into chunks of 5000 samples or less. // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise // cause thousands of very large in flight buffers occupying large amounts @@ -270,9 +247,14 @@ Outer: m = len(samples) } for i := 0; i < n; i++ { - shards[i] = processors[i].reuseBuf() + if shards[i] == nil { + shards[i] = processors[i].reuseBuf() + } } for _, sam := range samples[:m] { + if sam.T < minValidTime { + continue // Before minValidTime: discard. + } if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } @@ -280,7 +262,10 @@ Outer: shards[mod] = append(shards[mod], sam) } for i := 0; i < n; i++ { - processors[i].input <- shards[i] + if len(shards[i]) > 0 { + processors[i].input <- walSubsetProcessorInputItem{samples: shards[i]} + shards[i] = nil + } } samples = samples[m:] } @@ -307,6 +292,21 @@ Outer: } //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. exemplarsPool.Put(v) + case []record.RefMetadata: + for _, m := range v { + s := h.series.getByID(chunks.HeadSeriesRef(m.Ref)) + if s == nil { + unknownMetadataRefs.Inc() + continue + } + s.meta = &metadata.Metadata{ + Type: record.ToTextparseMetricType(m.Type), + Unit: m.Unit, + Help: m.Help, + } + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + metadataPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } @@ -333,21 +333,46 @@ Outer: return errors.Wrap(r.Err(), "read records") } - if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load()) + if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 || unknownMetadataRefs.Load() > 0 { + level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "metadata", unknownMetadataRefs.Load()) } - if mmapOverlappingChunks > 0 { - level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", mmapOverlappingChunks) + if count := mmapOverlappingChunks.Load(); count > 0 { + level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count) } return nil } // resetSeriesWithMMappedChunks is only used during the WAL replay. -func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc []*mmappedChunk) { - h.metrics.chunksCreated.Add(float64(len(mmc))) +func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) { + if mSeries.ref != walSeriesRef { + // Checking if the new m-mapped chunks overlap with the already existing ones. + if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 { + if overlapsClosedInterval( + mSeries.mmappedChunks[0].minTime, + mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime, + mmc[0].minTime, + mmc[len(mmc)-1].maxTime, + ) { + level.Debug(h.logger).Log( + "msg", "M-mapped chunks overlap on a duplicate series record", + "series", mSeries.lset.String(), + "oldref", mSeries.ref, + "oldmint", mSeries.mmappedChunks[0].minTime, + "oldmaxt", mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime, + "newref", walSeriesRef, + "newmint", mmc[0].minTime, + "newmaxt", mmc[len(mmc)-1].maxTime, + ) + overlapped = true + } + } + } + + h.metrics.chunksCreated.Add(float64(len(mmc) + len(oooMmc))) h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks))) - h.metrics.chunks.Add(float64(len(mmc) - len(mSeries.mmappedChunks))) + h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks))) mSeries.mmappedChunks = mmc + mSeries.oooMmappedChunks = oooMmc // Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject. if len(mmc) == 0 { mSeries.mmMaxTime = math.MinInt64 @@ -355,22 +380,41 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc []*mmappedCh mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime) } + if len(oooMmc) != 0 { + // Mint and maxt can be in any chunk, they are not sorted. + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) + for _, ch := range oooMmc { + if ch.minTime < mint { + mint = ch.minTime + } + if ch.maxTime > maxt { + maxt = ch.maxTime + } + } + h.updateMinOOOMaxOOOTime(mint, maxt) + } // Any samples replayed till now would already be compacted. Resetting the head chunk. mSeries.nextAt = 0 mSeries.headChunk = nil mSeries.app = nil + return } type walSubsetProcessor struct { - mx sync.Mutex // Take this lock while modifying series in the subset. - input chan []record.RefSample + input chan walSubsetProcessorInputItem output chan []record.RefSample } +type walSubsetProcessorInputItem struct { + samples []record.RefSample + existingSeries *memSeries + walSeriesRef chunks.HeadSeriesRef +} + func (wp *walSubsetProcessor) setup() { wp.output = make(chan []record.RefSample, 300) - wp.input = make(chan []record.RefSample, 300) + wp.input = make(chan walSubsetProcessorInputItem, 300) } func (wp *walSubsetProcessor) closeAndDrain() { @@ -391,19 +435,23 @@ func (wp *walSubsetProcessor) reuseBuf() []record.RefSample { // processWALSamples adds the samples it receives to the head and passes // the buffer received to an output channel for reuse. -// Samples before the minValidTime timestamp are discarded. -func (wp *walSubsetProcessor) processWALSamples(h *Head) (unknownRefs uint64) { +func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, mmapOverlappingChunks uint64) { defer close(wp.output) - minValidTime := h.minValidTime.Load() mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) - - for samples := range wp.input { - wp.mx.Lock() - for _, s := range samples { - if s.T < minValidTime { - continue + chunkRange := h.chunkRange.Load() + + for in := range wp.input { + if in.existingSeries != nil { + mmc := mmappedChunks[in.walSeriesRef] + oooMmc := oooMmappedChunks[in.walSeriesRef] + if h.resetSeriesWithMMappedChunks(in.existingSeries, mmc, oooMmc, in.walSeriesRef) { + mmapOverlappingChunks++ } + continue + } + + for _, s := range in.samples { ms := h.series.getByID(s.Ref) if ms == nil { unknownRefs++ @@ -412,7 +460,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head) (unknownRefs uint64) { if s.T <= ms.mmMaxTime { continue } - if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper); chunkCreated { + if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() } @@ -423,15 +471,289 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head) (unknownRefs uint64) { mint = s.T } } + select { + case wp.output <- in.samples: + default: + } + } + h.updateMinMaxTime(mint, maxt) + + return unknownRefs, mmapOverlappingChunks +} + +func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { + // Track number of samples, m-map markers, that referenced a series we don't know about + // for error reporting. + var unknownRefs, mmapMarkerUnknownRefs atomic.Uint64 + + lastSeq, lastOff := lastMmapRef.Unpack() + // Start workers that each process samples for a partition of the series ID space. + var ( + wg sync.WaitGroup + n = runtime.GOMAXPROCS(0) + processors = make([]wblSubsetProcessor, n) + + dec record.Decoder + shards = make([][]record.RefSample, n) + + decodedCh = make(chan interface{}, 10) + decodeErr error + samplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + markersPool = sync.Pool{ + New: func() interface{} { + return []record.RefMmapMarker{} + }, + } + ) + + defer func() { + // For CorruptionErr ensure to terminate all workers before exiting. + // We also wrap it to identify OOO WBL corruption. + _, ok := err.(*wal.CorruptionErr) + if ok { + err = &errLoadWbl{err: err} + for i := 0; i < n; i++ { + processors[i].closeAndDrain() + } + wg.Wait() + } + }() + + wg.Add(n) + for i := 0; i < n; i++ { + processors[i].setup() + + go func(wp *wblSubsetProcessor) { + unknown := wp.processWBLSamples(h) + unknownRefs.Add(unknown) + wg.Done() + }(&processors[i]) + } + + go func() { + defer close(decodedCh) + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, err = dec.Samples(rec, samples) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode samples"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decodedCh <- samples + case record.MmapMarkers: + markers := markersPool.Get().([]record.RefMmapMarker)[:0] + markers, err = dec.MmapMarkers(rec, markers) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode mmap markers"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decodedCh <- markers + default: + // Noop. + } + } + }() + + // The records are always replayed from the oldest to the newest. + for d := range decodedCh { + switch v := d.(type) { + case []record.RefSample: + samples := v + // We split up the samples into parts of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(samples) > 0 { + m := 5000 + if len(samples) < m { + m = len(samples) + } + for i := 0; i < n; i++ { + shards[i] = processors[i].reuseBuf() + } + for _, sam := range samples[:m] { + if r, ok := multiRef[sam.Ref]; ok { + sam.Ref = r + } + mod := uint64(sam.Ref) % uint64(n) + shards[mod] = append(shards[mod], sam) + } + for i := 0; i < n; i++ { + processors[i].input <- shards[i] + } + samples = samples[m:] + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + samplesPool.Put(d) + case []record.RefMmapMarker: + markers := v + for _, rm := range markers { + seq, off := rm.MmapRef.Unpack() + if seq > lastSeq || (seq == lastSeq && off > lastOff) { + // This m-map chunk from markers was not present during + // the load of mmapped chunks that happened in the head + // initialization. + continue + } + + if r, ok := multiRef[rm.Ref]; ok { + rm.Ref = r + } + + ms := h.series.getByID(rm.Ref) + if ms == nil { + mmapMarkerUnknownRefs.Inc() + continue + } + idx := uint64(ms.ref) % uint64(n) + // It is possible that some old sample is being processed in processWALSamples that + // could cause race below. So we wait for the goroutine to empty input the buffer and finish + // processing all old samples after emptying the buffer. + processors[idx].waitUntilIdle() + // Lock the subset so we can modify the series object + processors[idx].mx.Lock() + + // All samples till now have been m-mapped. Hence clear out the headChunk. + // In case some samples slipped through and went into m-map chunks because of changed + // chunk size parameters, we are not taking care of that here. + // TODO(codesome): see if there is a way to avoid duplicate m-map chunks if + // the size of ooo chunk was reduced between restart. + ms.oooHeadChunk = nil + + processors[idx].mx.Unlock() + } + default: + panic(fmt.Errorf("unexpected decodedCh type: %T", d)) + } + } + + if decodeErr != nil { + return decodeErr + } + + // Signal termination to each worker and wait for it to close its output channel. + for i := 0; i < n; i++ { + processors[i].closeAndDrain() + } + wg.Wait() + + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } + + if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { + level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) + } + return nil +} + +type errLoadWbl struct { + err error +} + +func (e errLoadWbl) Error() string { + return e.err.Error() +} + +// To support errors.Cause(). +func (e errLoadWbl) Cause() error { + return e.err +} + +// To support errors.Unwrap(). +func (e errLoadWbl) Unwrap() error { + return e.err +} + +// isErrLoadOOOWal returns a boolean if the error is errLoadWbl. +func isErrLoadOOOWal(err error) bool { + _, ok := err.(*errLoadWbl) + return ok +} + +type wblSubsetProcessor struct { + mx sync.Mutex // Take this lock while modifying series in the subset. + input chan []record.RefSample + output chan []record.RefSample +} + +func (wp *wblSubsetProcessor) setup() { + wp.output = make(chan []record.RefSample, 300) + wp.input = make(chan []record.RefSample, 300) +} + +func (wp *wblSubsetProcessor) closeAndDrain() { + close(wp.input) + for range wp.output { + } +} + +// If there is a buffer in the output chan, return it for reuse, otherwise return nil. +func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample { + select { + case buf := <-wp.output: + return buf[:0] + default: + } + return nil +} + +// processWBLSamples adds the samples it receives to the head and passes +// the buffer received to an output channel for reuse. +// Samples before the minValidTime timestamp are discarded. +func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { + defer close(wp.output) + + oooCapMax := h.opts.OutOfOrderCapMax.Load() + // We don't check for minValidTime for ooo samples. + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) + for samples := range wp.input { + wp.mx.Lock() + for _, s := range samples { + ms := h.series.getByID(s.Ref) + if ms == nil { + unknownRefs++ + continue + } + ok, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper, oooCapMax) + if chunkCreated { + h.metrics.chunksCreated.Inc() + h.metrics.chunks.Inc() + } + if ok { + if s.T < mint { + mint = s.T + } + if s.T > maxt { + maxt = s.T + } + } + } wp.mx.Unlock() wp.output <- samples } - h.updateMinMaxTime(mint, maxt) + + h.updateMinOOOMaxOOOTime(mint, maxt) return unknownRefs } -func (wp *walSubsetProcessor) waitUntilIdle() { +func (wp *wblSubsetProcessor) waitUntilIdle() { select { case <-wp.output: // Allow output side to drain to avoid deadlock. default: @@ -453,11 +775,10 @@ const ( ) type chunkSnapshotRecord struct { - ref chunks.HeadSeriesRef - lset labels.Labels - chunkRange int64 - mc *memChunk - sampleBuf [4]sample + ref chunks.HeadSeriesRef + lset labels.Labels + mc *memChunk + lastValue float64 } func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { @@ -465,12 +786,8 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { buf.PutByte(chunkSnapshotRecordTypeSeries) buf.PutBE64(uint64(s.ref)) - buf.PutUvarint(len(s.lset)) - for _, l := range s.lset { - buf.PutUvarintStr(l.Name) - buf.PutUvarintStr(l.Value) - } - buf.PutBE64int64(s.chunkRange) + record.EncodeLabels(&buf, s.lset) + buf.PutBE64int64(0) // Backwards-compatibility; was chunkRange but now unused. s.Lock() if s.headChunk == nil { @@ -481,18 +798,20 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { buf.PutBE64int64(s.headChunk.maxTime) buf.PutByte(byte(s.headChunk.chunk.Encoding())) buf.PutUvarintBytes(s.headChunk.chunk.Bytes()) - // Put the sample buf. - for _, smpl := range s.sampleBuf { - buf.PutBE64int64(smpl.t) - buf.PutBEFloat64(smpl.v) + // Backwards compatibility for old sampleBuf which had last 4 samples. + for i := 0; i < 3; i++ { + buf.PutBE64int64(0) + buf.PutBEFloat64(0) } + buf.PutBE64int64(0) + buf.PutBEFloat64(s.lastValue) } s.Unlock() return buf.Get() } -func decodeSeriesFromChunkSnapshot(b []byte) (csr chunkSnapshotRecord, err error) { +func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapshotRecord, err error) { dec := encoding.Decbuf{B: b} if flag := dec.Byte(); flag != chunkSnapshotRecordTypeSeries { @@ -500,15 +819,11 @@ func decodeSeriesFromChunkSnapshot(b []byte) (csr chunkSnapshotRecord, err error } csr.ref = chunks.HeadSeriesRef(dec.Be64()) - // The label set written to the disk is already sorted. - csr.lset = make(labels.Labels, dec.Uvarint()) - for i := range csr.lset { - csr.lset[i].Name = dec.UvarintStr() - csr.lset[i].Value = dec.UvarintStr() - } + // TODO: figure out why DecodeLabels calls Sort(), and perhaps remove it. + csr.lset = d.DecodeLabels(&dec) - csr.chunkRange = dec.Be64int64() + _ = dec.Be64int64() // Was chunkRange but now unused. if dec.Uvarint() == 0 { return } @@ -529,10 +844,13 @@ func decodeSeriesFromChunkSnapshot(b []byte) (csr chunkSnapshotRecord, err error } csr.mc.chunk = chk - for i := range csr.sampleBuf { - csr.sampleBuf[i].t = dec.Be64int64() - csr.sampleBuf[i].v = dec.Be64Float64() + // Backwards-compatibility for old sampleBuf which had last 4 samples. + for i := 0; i < 3; i++ { + _ = dec.Be64int64() + _ = dec.Be64Float64() } + _ = dec.Be64int64() + csr.lastValue = dec.Be64Float64() err = dec.Err() if err != nil && len(dec.B) > 0 { @@ -896,20 +1214,20 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie return } localRefSeries[csr.ref] = series - if chunks.HeadSeriesRef(h.lastSeriesID.Load()) < series.ref { - h.lastSeriesID.Store(uint64(series.ref)) + for { + seriesID := uint64(series.ref) + lastSeriesID := h.lastSeriesID.Load() + if lastSeriesID >= seriesID || h.lastSeriesID.CompareAndSwap(lastSeriesID, seriesID) { + break + } } - series.chunkRange = csr.chunkRange if csr.mc == nil { continue } series.nextAt = csr.mc.maxTime // This will create a new chunk on append. series.headChunk = csr.mc - for i := range series.sampleBuf { - series.sampleBuf[i].t = csr.sampleBuf[i].t - series.sampleBuf[i].v = csr.sampleBuf[i].v - } + series.lastValue = csr.lastValue app, err := series.headChunk.chunk.Appender() if err != nil { @@ -938,7 +1256,7 @@ Outer: switch rec[0] { case chunkSnapshotRecordTypeSeries: numSeries++ - csr, err := decodeSeriesFromChunkSnapshot(rec) + csr, err := decodeSeriesFromChunkSnapshot(&dec, rec) if err != nil { loopErr = errors.Wrap(err, "decode series record") break Outer diff --git a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go index 4919bfe9156..74d63c6af04 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go @@ -14,6 +14,7 @@ package tsdb import ( + "math" "sync" ) @@ -45,6 +46,7 @@ func (i *isolationState) IsolationDisabled() bool { type isolationAppender struct { appendID uint64 + minTime int64 prev *isolationAppender next *isolationAppender } @@ -116,13 +118,29 @@ func (i *isolation) lowWatermarkLocked() uint64 { return i.appendsOpenList.next.appendID } +// lowestAppendTime returns the lowest minTime for any open appender, +// or math.MaxInt64 if no open appenders. +func (i *isolation) lowestAppendTime() int64 { + var lowest int64 = math.MaxInt64 + i.appendMtx.RLock() + defer i.appendMtx.RUnlock() + + for a := i.appendsOpenList.next; a != i.appendsOpenList; a = a.next { + if lowest > a.minTime { + lowest = a.minTime + } + } + return lowest +} + // State returns an object used to control isolation // between a query and appends. Must be closed when complete. func (i *isolation) State(mint, maxt int64) *isolationState { i.appendMtx.RLock() // Take append mutex before read mutex. defer i.appendMtx.RUnlock() - // We need to track the reads even when isolation is disabled. + // We need to track reads even when isolation is disabled, so that head + // truncation can wait till reads overlapping that range have finished. isoState := &isolationState{ maxAppendID: i.appendsOpenList.appendID, lowWatermark: i.appendsOpenList.next.appendID, // Lowest appendID from appenders, or lastAppendId. @@ -163,7 +181,7 @@ func (i *isolation) TraverseOpenReads(f func(s *isolationState) bool) { // newAppendID increments the transaction counter and returns a new transaction // ID. The first ID returned is 1. // Also returns the low watermark, to keep lock/unlock operations down. -func (i *isolation) newAppendID() (uint64, uint64) { +func (i *isolation) newAppendID(minTime int64) (uint64, uint64) { if i.disabled { return 0, 0 } @@ -176,6 +194,7 @@ func (i *isolation) newAppendID() (uint64, uint64) { app := i.appendersPool.Get().(*isolationAppender) app.appendID = i.appendsOpenList.appendID + app.minTime = minTime app.prev = i.appendsOpenList.prev app.next = i.appendsOpenList diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go new file mode 100644 index 00000000000..3af60399122 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go @@ -0,0 +1,159 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "fmt" + "sort" + + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tombstones" +) + +// OOOChunk maintains samples in time-ascending order. +// Inserts for timestamps already seen, are dropped. +// Samples are stored uncompressed to allow easy sorting. +// Perhaps we can be more efficient later. +type OOOChunk struct { + samples []sample +} + +func NewOOOChunk() *OOOChunk { + return &OOOChunk{samples: make([]sample, 0, 4)} +} + +// Insert inserts the sample such that order is maintained. +// Returns false if insert was not possible due to the same timestamp already existing. +func (o *OOOChunk) Insert(t int64, v float64) bool { + // Find index of sample we should replace. + i := sort.Search(len(o.samples), func(i int) bool { return o.samples[i].t >= t }) + + if i >= len(o.samples) { + // none found. append it at the end + o.samples = append(o.samples, sample{t, v}) + return true + } + + if o.samples[i].t == t { + return false + } + + // Expand length by 1 to make room. use a zero sample, we will overwrite it anyway. + o.samples = append(o.samples, sample{}) + copy(o.samples[i+1:], o.samples[i:]) + o.samples[i] = sample{t, v} + + return true +} + +func (o *OOOChunk) NumSamples() int { + return len(o.samples) +} + +func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) { + x := chunkenc.NewXORChunk() + app, err := x.Appender() + if err != nil { + return nil, err + } + for _, s := range o.samples { + app.Append(s.t, s.v) + } + return x, nil +} + +func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, error) { + x := chunkenc.NewXORChunk() + app, err := x.Appender() + if err != nil { + return nil, err + } + for _, s := range o.samples { + if s.t < mint { + continue + } + if s.t > maxt { + break + } + app.Append(s.t, s.v) + } + return x, nil +} + +var _ BlockReader = &OOORangeHead{} + +// OOORangeHead allows querying Head out of order samples via BlockReader +// interface implementation. +type OOORangeHead struct { + head *Head + // mint and maxt are tracked because when a query is handled we only want + // the timerange of the query and having preexisting pointers to the first + // and last timestamp help with that. + mint, maxt int64 +} + +func NewOOORangeHead(head *Head, mint, maxt int64) *OOORangeHead { + return &OOORangeHead{ + head: head, + mint: mint, + maxt: maxt, + } +} + +func (oh *OOORangeHead) Index() (IndexReader, error) { + return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt), nil +} + +func (oh *OOORangeHead) Chunks() (ChunkReader, error) { + return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt), nil +} + +func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { + // As stated in the design doc https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing + // Tombstones are not supported for out of order metrics. + return tombstones.NewMemTombstones(), nil +} + +func (oh *OOORangeHead) Meta() BlockMeta { + var id [16]byte + copy(id[:], "____ooo_head____") + return BlockMeta{ + MinTime: oh.mint, + MaxTime: oh.maxt, + ULID: id, + Stats: BlockStats{ + NumSeries: oh.head.NumSeries(), + }, + } +} + +// Size returns the size taken by the Head block. +func (oh *OOORangeHead) Size() int64 { + return oh.head.Size() +} + +// String returns an human readable representation of the out of order range +// head. It's important to keep this function in order to avoid the struct dump +// when the head is stringified in errors or logs. +func (oh *OOORangeHead) String() string { + return fmt.Sprintf("ooo range head (mint: %d, maxt: %d)", oh.MinTime(), oh.MaxTime()) +} + +func (oh *OOORangeHead) MinTime() int64 { + return oh.mint +} + +func (oh *OOORangeHead) MaxTime() int64 { + return oh.maxt +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go new file mode 100644 index 00000000000..f63607dc9c9 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -0,0 +1,433 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "errors" + "math" + "sort" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/tombstones" +) + +var _ IndexReader = &OOOHeadIndexReader{} + +// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be +// accessed. +// It also has a reference to headIndexReader so we can leverage on its +// IndexReader implementation for all the methods that remain the same. We +// decided to do this to avoid code duplication. +// The only methods that change are the ones about getting Series and Postings. +type OOOHeadIndexReader struct { + *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. +} + +func NewOOOHeadIndexReader(head *Head, mint, maxt int64) *OOOHeadIndexReader { + hr := &headIndexReader{ + head: head, + mint: mint, + maxt: maxt, + } + return &OOOHeadIndexReader{hr} +} + +func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error { + return oh.series(ref, lbls, chks, 0) +} + +// The passed lastMmapRef tells upto what max m-map chunk that we can consider. +// If it is 0, it means all chunks need to be considered. +// If it is non-0, then the oooHeadChunk must not be considered. +func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error { + s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) + + if s == nil { + oh.head.metrics.seriesNotFound.Inc() + return storage.ErrNotFound + } + *lbls = append((*lbls)[:0], s.lset...) + + if chks == nil { + return nil + } + + s.Lock() + defer s.Unlock() + *chks = (*chks)[:0] + + tmpChks := make([]chunks.Meta, 0, len(s.oooMmappedChunks)) + + // We define these markers to track the last chunk reference while we + // fill the chunk meta. + // These markers are useful to give consistent responses to repeated queries + // even if new chunks that might be overlapping or not are added afterwards. + // Also, lastMinT and lastMaxT are initialized to the max int as a sentinel + // value to know they are unset. + var lastChunkRef chunks.ChunkRef + lastMinT, lastMaxT := int64(math.MaxInt64), int64(math.MaxInt64) + + addChunk := func(minT, maxT int64, ref chunks.ChunkRef) { + // the first time we get called is for the last included chunk. + // set the markers accordingly + if lastMinT == int64(math.MaxInt64) { + lastChunkRef = ref + lastMinT = minT + lastMaxT = maxT + } + + tmpChks = append(tmpChks, chunks.Meta{ + MinTime: minT, + MaxTime: maxT, + Ref: ref, + OOOLastRef: lastChunkRef, + OOOLastMinTime: lastMinT, + OOOLastMaxTime: lastMaxT, + }) + } + + // Collect all chunks that overlap the query range, in order from most recent to most old, + // so we can set the correct markers. + if s.oooHeadChunk != nil { + c := s.oooHeadChunk + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && lastMmapRef == 0 { + ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks)))) + addChunk(c.minTime, c.maxTime, ref) + } + } + for i := len(s.oooMmappedChunks) - 1; i >= 0; i-- { + c := s.oooMmappedChunks[i] + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (lastMmapRef == 0 || lastMmapRef.GreaterThanOrEqualTo(c.ref)) { + ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) + addChunk(c.minTime, c.maxTime, ref) + } + } + + // There is nothing to do if we did not collect any chunk + if len(tmpChks) == 0 { + return nil + } + + // Next we want to sort all the collected chunks by min time so we can find + // those that overlap. + sort.Sort(metaByMinTimeAndMinRef(tmpChks)) + + // Next we want to iterate the sorted collected chunks and only return the + // chunks Meta the first chunk that overlaps with others. + // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) + // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to + // to return chunk Metas for chunk 5 and chunk 6 + *chks = append(*chks, tmpChks[0]) + maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk" + for _, c := range tmpChks[1:] { + if c.MinTime > maxTime { + *chks = append(*chks, c) + maxTime = c.MaxTime + } else if c.MaxTime > maxTime { + maxTime = c.MaxTime + (*chks)[len(*chks)-1].MaxTime = c.MaxTime + } + } + + return nil +} + +// LabelValues needs to be overridden from the headIndexReader implementation due +// to the check that happens at the beginning where we make sure that the query +// interval overlaps with the head minooot and maxooot. +func (oh *OOOHeadIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() { + return []string{}, nil + } + + if len(matchers) == 0 { + return oh.head.postings.LabelValues(name), nil + } + + return labelValuesWithMatchers(oh, name, matchers...) +} + +type chunkMetaAndChunkDiskMapperRef struct { + meta chunks.Meta + ref chunks.ChunkDiskMapperRef + origMinT int64 + origMaxT int64 +} + +type byMinTimeAndMinRef []chunkMetaAndChunkDiskMapperRef + +func (b byMinTimeAndMinRef) Len() int { return len(b) } +func (b byMinTimeAndMinRef) Less(i, j int) bool { + if b[i].meta.MinTime == b[j].meta.MinTime { + return b[i].meta.Ref < b[j].meta.Ref + } + return b[i].meta.MinTime < b[j].meta.MinTime +} + +func (b byMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +type metaByMinTimeAndMinRef []chunks.Meta + +func (b metaByMinTimeAndMinRef) Len() int { return len(b) } +func (b metaByMinTimeAndMinRef) Less(i, j int) bool { + if b[i].MinTime == b[j].MinTime { + return b[i].Ref < b[j].Ref + } + return b[i].MinTime < b[j].MinTime +} + +func (b metaByMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { + switch len(values) { + case 0: + return index.EmptyPostings(), nil + case 1: + return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings + default: + // TODO(ganesh) We want to only return postings for out of order series. + res := make([]index.Postings, 0, len(values)) + for _, value := range values { + res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings + } + return index.Merge(res...), nil + } +} + +type OOOHeadChunkReader struct { + head *Head + mint, maxt int64 +} + +func NewOOOHeadChunkReader(head *Head, mint, maxt int64) *OOOHeadChunkReader { + return &OOOHeadChunkReader{ + head: head, + mint: mint, + maxt: maxt, + } +} + +func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { + sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack() + + s := cr.head.series.getByID(sid) + // This means that the series has been garbage collected. + if s == nil { + return nil, storage.ErrNotFound + } + + s.Lock() + c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) + s.Unlock() + if err != nil { + return nil, err + } + + // This means that the query range did not overlap with the requested chunk. + if len(c.chunks) == 0 { + return nil, storage.ErrNotFound + } + + return c, nil +} + +func (cr OOOHeadChunkReader) Close() error { + return nil +} + +type OOOCompactionHead struct { + oooIR *OOOHeadIndexReader + lastMmapRef chunks.ChunkDiskMapperRef + lastWBLFile int + postings []storage.SeriesRef + chunkRange int64 + mint, maxt int64 // Among all the compactable chunks. +} + +// NewOOOCompactionHead does the following: +// 1. M-maps all the in-memory ooo chunks. +// 2. Compute the expected block ranges while iterating through all ooo series and store it. +// 3. Store the list of postings having ooo series. +// 4. Cuts a new WBL file for the OOO WBL. +// All the above together have a bit of CPU and memory overhead, and can have a bit of impact +// on the sample append latency. So call NewOOOCompactionHead only right before compaction. +func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) { + newWBLFile, err := head.wbl.NextSegmentSync() + if err != nil { + return nil, err + } + + ch := &OOOCompactionHead{ + chunkRange: head.chunkRange.Load(), + mint: math.MaxInt64, + maxt: math.MinInt64, + lastWBLFile: newWBLFile, + } + + ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64) + n, v := index.AllPostingsKey() + + // TODO: verify this gets only ooo samples. + p, err := ch.oooIR.Postings(n, v) + if err != nil { + return nil, err + } + p = ch.oooIR.SortedPostings(p) + + var lastSeq, lastOff int + for p.Next() { + seriesRef := p.At() + ms := head.series.getByID(chunks.HeadSeriesRef(seriesRef)) + if ms == nil { + continue + } + + // M-map the in-memory chunk and keep track of the last one. + // Also build the block ranges -> series map. + // TODO: consider having a lock specifically for ooo data. + ms.Lock() + + mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) + if mmapRef == 0 && len(ms.oooMmappedChunks) > 0 { + // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. + mmapRef = ms.oooMmappedChunks[len(ms.oooMmappedChunks)-1].ref + } + seq, off := mmapRef.Unpack() + if seq > lastSeq || (seq == lastSeq && off > lastOff) { + ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off + } + if len(ms.oooMmappedChunks) > 0 { + ch.postings = append(ch.postings, seriesRef) + for _, c := range ms.oooMmappedChunks { + if c.minTime < ch.mint { + ch.mint = c.minTime + } + if c.maxTime > ch.maxt { + ch.maxt = c.maxTime + } + } + } + ms.Unlock() + } + + return ch, nil +} + +func (ch *OOOCompactionHead) Index() (IndexReader, error) { + return NewOOOCompactionHeadIndexReader(ch), nil +} + +func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { + return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt), nil +} + +func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { + return tombstones.NewMemTombstones(), nil +} + +func (ch *OOOCompactionHead) Meta() BlockMeta { + var id [16]byte + copy(id[:], "copy(id[:], \"ooo_compact_head\")") + return BlockMeta{ + MinTime: ch.mint, + MaxTime: ch.maxt, + ULID: id, + Stats: BlockStats{ + NumSeries: uint64(len(ch.postings)), + }, + } +} + +// CloneForTimeRange clones the OOOCompactionHead such that the IndexReader and ChunkReader +// obtained from this only looks at the m-map chunks within the given time ranges while not looking +// beyond the ch.lastMmapRef. +// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead. +func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead { + return &OOOCompactionHead{ + oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt), + lastMmapRef: ch.lastMmapRef, + postings: ch.postings, + chunkRange: ch.chunkRange, + mint: ch.mint, + maxt: ch.maxt, + } +} + +func (ch *OOOCompactionHead) Size() int64 { return 0 } +func (ch *OOOCompactionHead) MinTime() int64 { return ch.mint } +func (ch *OOOCompactionHead) MaxTime() int64 { return ch.maxt } +func (ch *OOOCompactionHead) ChunkRange() int64 { return ch.chunkRange } +func (ch *OOOCompactionHead) LastMmapRef() chunks.ChunkDiskMapperRef { return ch.lastMmapRef } +func (ch *OOOCompactionHead) LastWBLFile() int { return ch.lastWBLFile } + +type OOOCompactionHeadIndexReader struct { + ch *OOOCompactionHead +} + +func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader { + return &OOOCompactionHeadIndexReader{ch: ch} +} + +func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter { + return ir.ch.oooIR.Symbols() +} + +func (ir *OOOCompactionHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { + n, v := index.AllPostingsKey() + if name != n || len(values) != 1 || values[0] != v { + return nil, errors.New("only AllPostingsKey is supported") + } + return index.NewListPostings(ir.ch.postings), nil +} + +func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings { + // This will already be sorted from the Postings() call above. + return p +} + +func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { + return ir.ch.oooIR.series(ref, lset, chks, ir.ch.lastMmapRef) +} + +func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { + return "", errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) Close() error { + return ir.ch.oooIR.Close() +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 522adb87cdd..5141a2c1d26 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -569,7 +569,7 @@ func (p *populateWithDelGenericSeriesIterator) next() bool { p.i++ p.currChkMeta = p.chks[p.i] - p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta.Ref) + p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta) if p.err != nil { p.err = errors.Wrapf(p.err, "cannot populate chunk %d", p.currChkMeta.Ref) return false @@ -898,7 +898,7 @@ func newNopChunkReader() ChunkReader { } } -func (cr nopChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { +func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { return cr.emptyChunk, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 12a4047396f..162414a3ce6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" @@ -42,6 +43,90 @@ const ( Tombstones Type = 3 // Exemplars is used to match WAL records of type Exemplars. Exemplars Type = 4 + // MmapMarkers is used to match OOO WBL records of type MmapMarkers. + MmapMarkers Type = 5 + // Metadata is used to match WAL records of type Metadata. + Metadata Type = 6 +) + +func (rt Type) String() string { + switch rt { + case Series: + return "series" + case Samples: + return "samples" + case Exemplars: + return "exemplars" + case Tombstones: + return "tombstones" + case MmapMarkers: + return "mmapmarkers" + case Metadata: + return "metadata" + default: + return "unknown" + } +} + +// MetricType represents the type of a series. +type MetricType uint8 + +const ( + UnknownMT MetricType = 0 + Counter MetricType = 1 + Gauge MetricType = 2 + Histogram MetricType = 3 + GaugeHistogram MetricType = 4 + Summary MetricType = 5 + Info MetricType = 6 + Stateset MetricType = 7 +) + +func GetMetricType(t textparse.MetricType) uint8 { + switch t { + case textparse.MetricTypeCounter: + return uint8(Counter) + case textparse.MetricTypeGauge: + return uint8(Gauge) + case textparse.MetricTypeHistogram: + return uint8(Histogram) + case textparse.MetricTypeGaugeHistogram: + return uint8(GaugeHistogram) + case textparse.MetricTypeSummary: + return uint8(Summary) + case textparse.MetricTypeInfo: + return uint8(Info) + case textparse.MetricTypeStateset: + return uint8(Stateset) + default: + return uint8(UnknownMT) + } +} + +func ToTextparseMetricType(m uint8) textparse.MetricType { + switch m { + case uint8(Counter): + return textparse.MetricTypeCounter + case uint8(Gauge): + return textparse.MetricTypeGauge + case uint8(Histogram): + return textparse.MetricTypeHistogram + case uint8(GaugeHistogram): + return textparse.MetricTypeGaugeHistogram + case uint8(Summary): + return textparse.MetricTypeSummary + case uint8(Info): + return textparse.MetricTypeInfo + case uint8(Stateset): + return textparse.MetricTypeStateset + default: + return textparse.MetricTypeUnknown + } +} + +const ( + unitMetaName = "UNIT" + helpMetaName = "HELP" ) // ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. @@ -60,6 +145,14 @@ type RefSample struct { V float64 } +// RefMetadata is the metadata associated with a series ID. +type RefMetadata struct { + Ref chunks.HeadSeriesRef + Type uint8 + Unit string + Help string +} + // RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. type RefExemplar struct { Ref chunks.HeadSeriesRef @@ -68,7 +161,13 @@ type RefExemplar struct { Labels labels.Labels } -// Decoder decodes series, sample, and tombstone records. +// RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk. +type RefMmapMarker struct { + Ref chunks.HeadSeriesRef + MmapRef chunks.ChunkDiskMapperRef +} + +// Decoder decodes series, sample, metadata and tombstone records. // The zero value is ready to use. type Decoder struct{} @@ -79,7 +178,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata: return t } return Unknown @@ -94,14 +193,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { } for len(dec.B) > 0 && dec.Err() == nil { ref := storage.SeriesRef(dec.Be64()) - - lset := make(labels.Labels, dec.Uvarint()) - - for i := range lset { - lset[i].Name = dec.UvarintStr() - lset[i].Value = dec.UvarintStr() - } - sort.Sort(lset) + lset := d.DecodeLabels(&dec) series = append(series, RefSeries{ Ref: chunks.HeadSeriesRef(ref), @@ -117,6 +209,61 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { return series, nil } +// Metadata appends metadata in rec to the given slice. +func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, error) { + dec := encoding.Decbuf{B: rec} + + if Type(dec.Byte()) != Metadata { + return nil, errors.New("invalid record type") + } + for len(dec.B) > 0 && dec.Err() == nil { + ref := dec.Uvarint64() + typ := dec.Byte() + numFields := dec.Uvarint() + + // We're currently aware of two more metadata fields other than TYPE; that is UNIT and HELP. + // We can skip the rest of the fields (if we encounter any), but we must decode them anyway + // so we can correctly align with the start with the next metadata record. + var unit, help string + for i := 0; i < numFields; i++ { + fieldName := dec.UvarintStr() + fieldValue := dec.UvarintStr() + switch fieldName { + case unitMetaName: + unit = fieldValue + case helpMetaName: + help = fieldValue + } + } + + metadata = append(metadata, RefMetadata{ + Ref: chunks.HeadSeriesRef(ref), + Type: typ, + Unit: unit, + Help: help, + }) + } + if dec.Err() != nil { + return nil, dec.Err() + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return metadata, nil +} + +// DecodeLabels decodes one set of labels from buf. +func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels { + lset := make(labels.Labels, dec.Uvarint()) + + for i := range lset { + lset[i].Name = dec.UvarintStr() + lset[i].Value = dec.UvarintStr() + } + sort.Sort(lset) + return lset +} + // Samples appends samples in rec to the given slice. func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) { dec := encoding.Decbuf{B: rec} @@ -198,13 +345,7 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp dref := dec.Varint64() dtime := dec.Varint64() val := dec.Be64() - - lset := make(labels.Labels, dec.Uvarint()) - for i := range lset { - lset[i].Name = dec.UvarintStr() - lset[i].Value = dec.UvarintStr() - } - sort.Sort(lset) + lset := d.DecodeLabels(dec) exemplars = append(exemplars, RefExemplar{ Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)), @@ -223,6 +364,34 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp return exemplars, nil } +func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarker, error) { + dec := encoding.Decbuf{B: rec} + t := Type(dec.Byte()) + if t != MmapMarkers { + return nil, errors.New("invalid record type") + } + + if dec.Len() == 0 { + return markers, nil + } + for len(dec.B) > 0 && dec.Err() == nil { + ref := chunks.HeadSeriesRef(dec.Be64()) + mmapRef := chunks.ChunkDiskMapperRef(dec.Be64()) + markers = append(markers, RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) + } + + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d mmap markers", len(markers)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return markers, nil +} + // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. type Encoder struct{} @@ -234,16 +403,41 @@ func (e *Encoder) Series(series []RefSeries, b []byte) []byte { for _, s := range series { buf.PutBE64(uint64(s.Ref)) - buf.PutUvarint(len(s.Labels)) + EncodeLabels(&buf, s.Labels) + } + return buf.Get() +} - for _, l := range s.Labels { - buf.PutUvarintStr(l.Name) - buf.PutUvarintStr(l.Value) - } +// Metadata appends the encoded metadata to b and returns the resulting slice. +func (e *Encoder) Metadata(metadata []RefMetadata, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(Metadata)) + + for _, m := range metadata { + buf.PutUvarint64(uint64(m.Ref)) + + buf.PutByte(m.Type) + + buf.PutUvarint(2) // num_fields: We currently have two more metadata fields, UNIT and HELP. + buf.PutUvarintStr(unitMetaName) + buf.PutUvarintStr(m.Unit) + buf.PutUvarintStr(helpMetaName) + buf.PutUvarintStr(m.Help) } + return buf.Get() } +// EncodeLabels encodes the contents of labels into buf. +func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) { + buf.PutUvarint(len(lbls)) + + for _, l := range lbls { + buf.PutUvarintStr(l.Name) + buf.PutUvarintStr(l.Value) + } +} + // Samples appends the encoded samples to b and returns the resulting slice. func (e *Encoder) Samples(samples []RefSample, b []byte) []byte { buf := encoding.Encbuf{B: b} @@ -308,11 +502,18 @@ func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encodi buf.PutVarint64(int64(ex.Ref) - int64(first.Ref)) buf.PutVarint64(ex.T - first.T) buf.PutBE64(math.Float64bits(ex.V)) + EncodeLabels(buf, ex.Labels) + } +} - buf.PutUvarint(len(ex.Labels)) - for _, l := range ex.Labels { - buf.PutUvarintStr(l.Name) - buf.PutUvarintStr(l.Value) - } +func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(MmapMarkers)) + + for _, s := range markers { + buf.PutBE64(uint64(s.Ref)) + buf.PutBE64(uint64(s.MmapRef)) } + + return buf.Get() } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go index 73244632472..a1dcd9a5f3f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go @@ -44,6 +44,8 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l } }() + sampleCount := 0 + const commitAfter = 10000 ctx := context.Background() app := w.Appender(ctx) @@ -57,10 +59,19 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l if err != nil { return "", err } + sampleCount++ } if it.Err() != nil { return "", it.Err() } + // Commit and make a new appender periodically, to avoid building up data in memory. + if sampleCount > commitAfter { + if err = app.Commit(); err != nil { + return "", err + } + app = w.Appender(ctx) + sampleCount = 0 + } } if err = app.Commit(); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go deleted file mode 100644 index 3e136bb1d53..00000000000 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/buffer.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tsdbutil - -import ( - "math" - - "github.com/prometheus/prometheus/tsdb/chunkenc" -) - -// BufferedSeriesIterator wraps an iterator with a look-back buffer. -type BufferedSeriesIterator struct { - it chunkenc.Iterator - buf *sampleRing - - lastTime int64 -} - -// NewBuffer returns a new iterator that buffers the values within the time range -// of the current element and the duration of delta before. -func NewBuffer(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { - return &BufferedSeriesIterator{ - it: it, - buf: newSampleRing(delta, 16), - lastTime: math.MinInt64, - } -} - -// PeekBack returns the previous element of the iterator. If there is none buffered, -// ok is false. -func (b *BufferedSeriesIterator) PeekBack() (t int64, v float64, ok bool) { - return b.buf.last() -} - -// Buffer returns an iterator over the buffered data. -func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator { - return b.buf.iterator() -} - -// Seek advances the iterator to the element at time t or greater. -func (b *BufferedSeriesIterator) Seek(t int64) bool { - t0 := t - b.buf.delta - - // If the delta would cause us to seek backwards, preserve the buffer - // and just continue regular advancement while filling the buffer on the way. - if t0 > b.lastTime { - b.buf.reset() - - ok := b.it.Seek(t0) - if !ok { - return false - } - b.lastTime, _ = b.At() - } - - if b.lastTime >= t { - return true - } - for b.Next() { - if b.lastTime >= t { - return true - } - } - - return false -} - -// Next advances the iterator to the next element. -func (b *BufferedSeriesIterator) Next() bool { - // Add current element to buffer before advancing. - b.buf.add(b.it.At()) - - ok := b.it.Next() - if ok { - b.lastTime, _ = b.At() - } - return ok -} - -// At returns the current element of the iterator. -func (b *BufferedSeriesIterator) At() (int64, float64) { - return b.it.At() -} - -// Err returns the last encountered error. -func (b *BufferedSeriesIterator) Err() error { - return b.it.Err() -} - -type sample struct { - t int64 - v float64 -} - -func (s sample) T() int64 { - return s.t -} - -func (s sample) V() float64 { - return s.v -} - -type sampleRing struct { - delta int64 - - buf []sample // lookback buffer - i int // position of most recent element in ring buffer - f int // position of first element in ring buffer - l int // number of elements in buffer -} - -func newSampleRing(delta int64, sz int) *sampleRing { - r := &sampleRing{delta: delta, buf: make([]sample, sz)} - r.reset() - - return r -} - -func (r *sampleRing) reset() { - r.l = 0 - r.i = -1 - r.f = 0 -} - -func (r *sampleRing) iterator() chunkenc.Iterator { - return &sampleRingIterator{r: r, i: -1} -} - -type sampleRingIterator struct { - r *sampleRing - i int -} - -func (it *sampleRingIterator) Next() bool { - it.i++ - return it.i < it.r.l -} - -func (it *sampleRingIterator) Seek(int64) bool { - return false -} - -func (it *sampleRingIterator) Err() error { - return nil -} - -func (it *sampleRingIterator) At() (int64, float64) { - return it.r.at(it.i) -} - -func (r *sampleRing) at(i int) (int64, float64) { - j := (r.f + i) % len(r.buf) - s := r.buf[j] - return s.t, s.v -} - -// add adds a sample to the ring buffer and frees all samples that fall -// out of the delta range. -func (r *sampleRing) add(t int64, v float64) { - l := len(r.buf) - // Grow the ring buffer if it fits no more elements. - if l == r.l { - buf := make([]sample, 2*l) - copy(buf[l+r.f:], r.buf[r.f:]) - copy(buf, r.buf[:r.f]) - - r.buf = buf - r.i = r.f - r.f += l - } else { - r.i++ - if r.i >= l { - r.i -= l - } - } - - r.buf[r.i] = sample{t: t, v: v} - r.l++ - - // Free head of the buffer of samples that just fell out of the range. - for r.buf[r.f].t < t-r.delta { - r.f++ - if r.f >= l { - r.f -= l - } - r.l-- - } -} - -// last returns the most recent element added to the ring. -func (r *sampleRing) last() (int64, float64, bool) { - if r.l == 0 { - return 0, 0, false - } - s := r.buf[r.i] - return s.t, s.v, true -} - -func (r *sampleRing) samples() []sample { - res := make([]sample, r.l) - - k := r.f + r.l - var j int - if k > len(r.buf) { - k = len(r.buf) - j = r.l - k + r.f - } - - n := copy(res, r.buf[r.f:k]) - copy(res[n:], r.buf[:j]) - - return res -} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go index ffe9c05e001..541d592d484 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go @@ -57,6 +57,19 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { } } +type sample struct { + t int64 + v float64 +} + +func (s sample) T() int64 { + return s.t +} + +func (s sample) V() float64 { + return s.v +} + // PopulatedChunk creates a chunk populated with samples every second starting at minTime func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { samples := make([]Sample, numSamples) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go index 2b9f117aa5e..615903c6395 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go @@ -23,7 +23,6 @@ import ( "math" "os" "path/filepath" - "sort" "sync" "time" @@ -32,7 +31,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/encoding" @@ -790,12 +788,7 @@ const ( func (w *SegmentWAL) encodeSeries(buf *encoding.Encbuf, series []record.RefSeries) uint8 { for _, s := range series { buf.PutBE64(uint64(s.Ref)) - buf.PutUvarint(len(s.Labels)) - - for _, l := range s.Labels { - buf.PutUvarintStr(l.Name) - buf.PutUvarintStr(l.Value) - } + record.EncodeLabels(buf, s.Labels) } return walSeriesSimple } @@ -840,6 +833,7 @@ type walReader struct { cur int buf []byte crc32 hash.Hash32 + dec record.Decoder curType WALEntryType curFlag byte @@ -1123,14 +1117,7 @@ func (r *walReader) decodeSeries(flag byte, b []byte, res *[]record.RefSeries) e for len(dec.B) > 0 && dec.Err() == nil { ref := chunks.HeadSeriesRef(dec.Be64()) - - lset := make(labels.Labels, dec.Uvarint()) - - for i := range lset { - lset[i].Name = dec.UvarintStr() - lset[i].Value = dec.UvarintStr() - } - sort.Sort(lset) + lset := r.dec.DecodeLabels(&dec) *res = append(*res, record.RefSeries{ Ref: ref, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go index af4bd940c75..f8f9f387d59 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go @@ -41,10 +41,12 @@ type CheckpointStats struct { DroppedSamples int DroppedTombstones int DroppedExemplars int + DroppedMetadata int TotalSeries int // Processed series including dropped ones. TotalSamples int // Processed samples including dropped ones. TotalTombstones int // Processed tombstones including dropped ones. TotalExemplars int // Processed exemplars including dropped ones. + TotalMetadata int // Processed metadata including dropped ones. } // LastCheckpoint returns the directory name and index of the most recent checkpoint. @@ -84,7 +86,8 @@ const checkpointPrefix = "checkpoint." // Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL. // It includes the most recent checkpoint if it exists. -// All series not satisfying keep and samples/tombstones/exemplars below mint are dropped. +// All series not satisfying keep, samples/tombstones/exemplars below mint and +// metadata that are not the latest are dropped. // // The checkpoint is stored in a directory named checkpoint.N in the same // segmented format as the original WAL itself. @@ -149,13 +152,16 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea samples []record.RefSample tstones []tombstones.Stone exemplars []record.RefExemplar + metadata []record.RefMetadata dec record.Decoder enc record.Encoder buf []byte recs [][]byte + + latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata) ) for r.Next() { - series, samples, tstones, exemplars = series[:0], samples[:0], tstones[:0], exemplars[:0] + series, samples, tstones, exemplars, metadata = series[:0], samples[:0], tstones[:0], exemplars[:0], metadata[:0] // We don't reset the buffer since we batch up multiple records // before writing them to the checkpoint. @@ -238,6 +244,23 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea } stats.TotalExemplars += len(exemplars) stats.DroppedExemplars += len(exemplars) - len(repl) + case record.Metadata: + metadata, err := dec.Metadata(rec, metadata) + if err != nil { + return nil, errors.Wrap(err, "decode metadata") + } + // Only keep reference to the latest found metadata for each refID. + repl := 0 + for _, m := range metadata { + if keep(m.Ref) { + if _, ok := latestMetadataMap[m.Ref]; !ok { + repl++ + } + latestMetadataMap[m.Ref] = m + } + } + stats.TotalMetadata += len(metadata) + stats.DroppedMetadata += len(metadata) - repl default: // Unknown record type, probably from a future Prometheus version. continue @@ -265,6 +288,18 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea if err := cp.Log(recs...); err != nil { return nil, errors.Wrap(err, "flush records") } + + // Flush latest metadata records for each series. + if len(latestMetadataMap) > 0 { + latestMetadata := make([]record.RefMetadata, 0, len(latestMetadataMap)) + for _, m := range latestMetadataMap { + latestMetadata = append(latestMetadata, m) + } + if err := cp.Log(enc.Metadata(latestMetadata, buf[:0])); err != nil { + return nil, errors.Wrap(err, "flush metadata records") + } + } + if err := cp.Close(); err != nil { return nil, errors.Wrap(err, "close checkpoint") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go index ace6a99566f..191b09ed992 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go @@ -40,6 +40,7 @@ const ( DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB pageSize = 32 * 1024 // 32KB recordHeaderSize = 7 + WblDirName = "wbl" ) // The table gets initialized with sync.Once but may still cause a race @@ -204,32 +205,32 @@ func newWALMetrics(r prometheus.Registerer) *walMetrics { m := &walMetrics{} m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "prometheus_tsdb_wal_fsync_duration_seconds", + Name: "fsync_duration_seconds", Help: "Duration of WAL fsync.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_page_flushes_total", + Name: "page_flushes_total", Help: "Total number of page flushes.", }) m.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_completed_pages_total", + Name: "completed_pages_total", Help: "Total number of completed pages.", }) m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_truncations_failed_total", + Name: "truncations_failed_total", Help: "Total number of WAL truncations that failed.", }) m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_truncations_total", + Name: "truncations_total", Help: "Total number of WAL truncations attempted.", }) m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "prometheus_tsdb_wal_segment_current", + Name: "segment_current", Help: "WAL segment index that TSDB is currently writing to.", }) m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_writes_failed_total", + Name: "writes_failed_total", Help: "Total number of WAL writes that failed.", }) @@ -274,7 +275,11 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi stopc: make(chan chan struct{}), compress: compress, } - w.metrics = newWALMetrics(reg) + prefix := "prometheus_tsdb_wal_" + if filepath.Base(dir) == WblDirName { + prefix = "prometheus_tsdb_out_of_order_wal_" + } + w.metrics = newWALMetrics(prometheus.WrapRegistererWithPrefix(prefix, reg)) _, last, err := Segments(w.Dir()) if err != nil { @@ -459,36 +464,46 @@ func SegmentName(dir string, i int) string { return filepath.Join(dir, fmt.Sprintf("%08d", i)) } -// NextSegment creates the next segment and closes the previous one. -func (w *WAL) NextSegment() error { +// NextSegment creates the next segment and closes the previous one asynchronously. +// It returns the file number of the new file. +func (w *WAL) NextSegment() (int, error) { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.nextSegment(true) +} + +// NextSegmentSync creates the next segment and closes the previous one in sync. +// It returns the file number of the new file. +func (w *WAL) NextSegmentSync() (int, error) { w.mtx.Lock() defer w.mtx.Unlock() - return w.nextSegment() + return w.nextSegment(false) } // nextSegment creates the next segment and closes the previous one. -func (w *WAL) nextSegment() error { +// It returns the file number of the new file. +func (w *WAL) nextSegment(async bool) (int, error) { if w.closed { - return errors.New("wal is closed") + return 0, errors.New("wal is closed") } // Only flush the current page if it actually holds data. if w.page.alloc > 0 { if err := w.flushPage(true); err != nil { - return err + return 0, err } } next, err := CreateSegment(w.Dir(), w.segment.Index()+1) if err != nil { - return errors.Wrap(err, "create new segment file") + return 0, errors.Wrap(err, "create new segment file") } prev := w.segment if err := w.setSegment(next); err != nil { - return err + return 0, err } // Don't block further writes by fsyncing the last segment. - w.actorc <- func() { + f := func() { if err := w.fsync(prev); err != nil { level.Error(w.logger).Log("msg", "sync previous segment", "err", err) } @@ -496,7 +511,12 @@ func (w *WAL) nextSegment() error { level.Error(w.logger).Log("msg", "close previous segment", "err", err) } } - return nil + if async { + w.actorc <- f + } else { + f() + } + return next.Index(), nil } func (w *WAL) setSegment(segment *Segment) error { @@ -638,7 +658,7 @@ func (w *WAL) log(rec []byte, final bool) error { left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment. if len(rec) > left { - if err := w.nextSegment(); err != nil { + if _, err := w.nextSegment(true); err != nil { return err } } @@ -745,6 +765,13 @@ func (w *WAL) fsync(f *Segment) error { return err } +// Sync forces a file sync on the current wal segment. This function is meant +// to be used only on tests due to different behaviour on Operating Systems +// like windows and linux +func (w *WAL) Sync() error { + return w.fsync(w.segment) +} + // Close flushes all writes and closes active segment. func (w *WAL) Close() (err error) { w.mtx.Lock() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go index 7b026b4ea8f..5b949fc00ff 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go @@ -481,7 +481,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { ) for r.Next() && !isClosed(w.quit) { rec := r.Record() - w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc() + w.recordsReadMetric.WithLabelValues(dec.Type(rec).String()).Inc() switch dec.Type(rec) { case record.Series: @@ -554,7 +554,7 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error ) for r.Next() && !isClosed(w.quit) { rec := r.Record() - w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc() + w.recordsReadMetric.WithLabelValues(dec.Type(rec).String()).Inc() switch dec.Type(rec) { case record.Series: @@ -583,19 +583,6 @@ func (w *Watcher) SetStartTime(t time.Time) { w.startTimestamp = timestamp.FromTime(t) } -func recordType(rt record.Type) string { - switch rt { - case record.Series: - return "series" - case record.Samples: - return "samples" - case record.Tombstones: - return "tombstones" - default: - return "unknown" - } -} - type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) error // Read all the series records from a Checkpoint directory. diff --git a/vendor/github.com/prometheus/prometheus/util/logging/file.go b/vendor/github.com/prometheus/prometheus/util/logging/file.go index 6b5751b016c..2afa828547f 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/file.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/file.go @@ -14,11 +14,11 @@ package logging import ( + "fmt" "os" "time" "github.com/go-kit/log" - "github.com/pkg/errors" ) var timestampFormat = log.TimestampFormat( @@ -40,7 +40,7 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { - return nil, errors.Wrap(err, "can't create json logger") + return nil, fmt.Errorf("can't create json logger: %w", err) } return &JSONFileLogger{ diff --git a/vendor/github.com/prometheus/prometheus/util/strutil/quote.go b/vendor/github.com/prometheus/prometheus/util/strutil/quote.go index 86d477fc1f6..95dcb6f694f 100644 --- a/vendor/github.com/prometheus/prometheus/util/strutil/quote.go +++ b/vendor/github.com/prometheus/prometheus/util/strutil/quote.go @@ -114,10 +114,10 @@ func Unquote(s string) (t string, err error) { // or character literal represented by the string s. // It returns four values: // -// 1) value, the decoded Unicode code point or byte value; -// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3) tail, the remainder of the string after the character; and -// 4) an error that will be nil if the character is syntactically valid. +// 1. value, the decoded Unicode code point or byte value; +// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3. tail, the remainder of the string after the character; and +// 4. an error that will be nil if the character is syntactically valid. // // The second argument, quote, specifies the type of literal being parsed // and therefore which escaped quote character is permitted. diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 0ad24abdbfb..9ceaa8c3f2d 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -323,6 +323,9 @@ func (api *API) Register(r *route.Router) { r.Get("/query_exemplars", wrapAgent(api.queryExemplars)) r.Post("/query_exemplars", wrapAgent(api.queryExemplars)) + r.Get("/format_query", wrapAgent(api.formatQuery)) + r.Post("/format_query", wrapAgent(api.formatQuery)) + r.Get("/labels", wrapAgent(api.labelNames)) r.Post("/labels", wrapAgent(api.labelNames)) r.Get("/label/:name/values", wrapAgent(api.labelValues)) @@ -428,6 +431,15 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { }, nil, res.Warnings, qry.Close} } +func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { + expr, err := parser.ParseExpr(r.FormValue("query")) + if err != nil { + return invalidParamError(err, "query") + } + + return apiFuncResult{expr.Pretty(0), nil, nil, nil} +} + func extractQueryOpts(r *http.Request) *promql.QueryOpts { return &promql.QueryOpts{ EnablePerStepStats: r.FormValue("stats") == "all", @@ -756,15 +768,21 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { End: timestamp.FromTime(end), Func: "series", // There is no series function, this token is used for lookups that don't need samples. } + var set storage.SeriesSet - var sets []storage.SeriesSet - for _, mset := range matcherSets { - // We need to sort this select results to merge (deduplicate) the series sets later. - s := q.Select(true, hints, mset...) - sets = append(sets, s) + if len(matcherSets) > 1 { + var sets []storage.SeriesSet + for _, mset := range matcherSets { + // We need to sort this select results to merge (deduplicate) the series sets later. + s := q.Select(true, hints, mset...) + sets = append(sets, s) + } + set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) + } else { + // At this point at least one match exists. + set = q.Select(false, hints, matcherSets[0]...) } - set := storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) metrics := []labels.Labels{} for set.Next() { metrics = append(metrics, set.At().Labels()) @@ -1652,11 +1670,12 @@ func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { } // marshalExemplarJSON writes. -// { -// labels: , -// value: "", -// timestamp: -// } +// +// { +// labels: , +// value: "", +// timestamp: +// } func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { p := *((*exemplar.Exemplar)(ptr)) stream.WriteObjectStart() diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index d1a3726e09d..d44ef3382c7 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -14,6 +14,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Added - [#15](https://github.com/thanos-io/objstore/pull/15) Add Oracle Cloud Infrastructure Object Storage Bucket support. +- [#25](https://github.com/thanos-io/objstore/pull/25) S3: Support specifying S3 storage class. ### Changed diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go index c1ed5cf6416..3ce8af950cc 100644 --- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go +++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go @@ -96,6 +96,9 @@ const ( // NOTE: we're using a context value only because it's a very specific S3 option. If SSE will // be available to wider set of backends we should probably add a variadic option to Get() and Upload(). sseConfigKey = ctxKey(0) + + // Storage class header. + amzStorageClass = "X-Amz-Storage-Class" ) var DefaultConfig = Config{ @@ -160,6 +163,7 @@ type Bucket struct { client *minio.Client defaultSSE encrypt.ServerSide putUserMetadata map[string]string + storageClass string partSize uint64 listObjectsV1 bool } @@ -310,12 +314,23 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B return nil, errors.Errorf("Initialize s3 client list objects version: Unsupported version %q was provided. Supported values are v1, v2", config.ListObjectsVersion) } + var storageClass string + amzStorageClassLower := strings.ToLower(amzStorageClass) + for k, v := range config.PutUserMetadata { + if strings.ToLower(k) == amzStorageClassLower { + delete(config.PutUserMetadata, k) + storageClass = v + break + } + } + bkt := &Bucket{ logger: logger, name: config.Bucket, client: client, defaultSSE: sse, putUserMetadata: config.PutUserMetadata, + storageClass: storageClass, partSize: config.PartSize, listObjectsV1: config.ListObjectsVersion == "v1", } @@ -486,6 +501,7 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { PartSize: partSize, ServerSideEncryption: sse, UserMetadata: b.putUserMetadata, + StorageClass: b.storageClass, // 4 is what minio-go have as the default. To be certain we do micro benchmark before any changes we // ensure we pin this number to four. // TODO(bwplotka): Consider adjusting this number to GOMAXPROCS or to expose this in config if it becomes bottleneck. diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 37461d65553..7886e5d8693 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -206,10 +206,10 @@ func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket // Delete removes directory that is meant to be block directory. // NOTE: Always prefer this method for deleting blocks. -// * We have to delete block's files in the certain order (meta.json first and deletion-mark.json last) -// to ensure we don't end up with malformed partial blocks. Thanos system handles well partial blocks -// only if they don't have meta.json. If meta.json is present Thanos assumes valid block. -// * This avoids deleting empty dir (whole bucket) by mistake. +// - We have to delete block's files in the certain order (meta.json first and deletion-mark.json last) +// to ensure we don't end up with malformed partial blocks. Thanos system handles well partial blocks +// only if they don't have meta.json. If meta.json is present Thanos assumes valid block. +// - This avoids deleting empty dir (whole bucket) by mistake. func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) error { metaFile := path.Join(id.String(), MetaFilename) deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/index.go b/vendor/github.com/thanos-io/thanos/pkg/block/index.go index 4bc13195d49..0696cbbcd3b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/index.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/index.go @@ -579,7 +579,7 @@ func rewrite( sort.Sort(lset) for i, c := range chks { - chks[i].Chunk, err = chunkr.Chunk(c.Ref) + chks[i].Chunk, err = chunkr.Chunk(c) if err != nil { return errors.Wrap(err, "chunk read") } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/hash.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/hash.go index 424d8868f53..0daf1bf799f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/hash.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/hash.go @@ -4,7 +4,6 @@ package metadata import ( - "crypto/sha256" "encoding/hex" "fmt" "io" @@ -12,6 +11,7 @@ import ( "path/filepath" "github.com/go-kit/log" + "github.com/minio/sha256-simd" "github.com/pkg/errors" "github.com/thanos-io/thanos/pkg/runutil" diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 55df95e55ed..c6c36bf3cc1 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -1154,7 +1154,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp } level.Info(cg.logger).Log("msg", "finished compacting blocks", "result_block", compID, "source_blocks", sourceBlockStr, - time.Since(groupCompactionBegin), "duration_ms", time.Since(groupCompactionBegin).Milliseconds()) + "duration", time.Since(groupCompactionBegin), "duration_ms", time.Since(groupCompactionBegin).Milliseconds()) return true, compID, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 639d54c7cd1..7731eab877d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -130,7 +130,7 @@ func Downsample( // While #183 exists, we sanitize the chunks we retrieved from the block // before retrieving their samples. for i, c := range chks { - chk, err := chunkr.Chunk(c.Ref) + chk, err := chunkr.Chunk(c) if err != nil { return id, errors.Wrapf(err, "get chunk %d, series %d", c.Ref, postings.At()) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go index 0222734376d..9737de279f7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/dedup/iter.go @@ -49,7 +49,7 @@ func NewSeriesSet(set storage.SeriesSet, replicaLabels map[string]struct{}, f st // trimPushdownMarker trims the pushdown marker from the given labels. // Returns true if there was a pushdown marker. func trimPushdownMarker(lbls labels.Labels) (labels.Labels, bool) { - return labels.NewBuilder(lbls).Del(PushdownMarker.Name).Labels(), lbls.Has(PushdownMarker.Name) + return labels.NewBuilder(lbls).Del(PushdownMarker.Name).Labels(nil), lbls.Has(PushdownMarker.Name) } func (s *dedupSeriesSet) Next() bool { diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go index f2fb3769c4d..1964ccf13c7 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go @@ -21,22 +21,22 @@ var ErrNoSuchHost = errors.New("no such host") // // There are three possible outcomes: // -// 1. One of the permutations of the given name is recognized as -// "valid" by the DNS, in which case we consider ourselves "done" -// and that answer is returned. Note that, due to the way the DNS -// handles "name has resource records, but none of the specified type", -// the answer received may have an empty set of results. +// 1. One of the permutations of the given name is recognized as +// "valid" by the DNS, in which case we consider ourselves "done" +// and that answer is returned. Note that, due to the way the DNS +// handles "name has resource records, but none of the specified type", +// the answer received may have an empty set of results. // -// 2. All of the permutations of the given name are responded to by one of -// the servers in the "nameservers" list with the answer "that name does -// not exist" (NXDOMAIN). In that case, it can be considered -// pseudo-authoritative that there are no records for that name. +// 2. All of the permutations of the given name are responded to by one of +// the servers in the "nameservers" list with the answer "that name does +// not exist" (NXDOMAIN). In that case, it can be considered +// pseudo-authoritative that there are no records for that name. // -// 3. One or more of the names was responded to by all servers with some -// sort of error indication. In that case, we can't know if, in fact, -// there are records for the name or not, so whatever state the -// configuration is in, we should keep it that way until we know for -// sure (by, presumably, all the names getting answers in the future). +// 3. One or more of the names was responded to by all servers with some +// sort of error indication. In that case, we can't know if, in fact, +// there are records for the name or not, so whatever state the +// configuration is in, we should keep it that way until we know for +// sure (by, presumably, all the names getting answers in the future). // // Outcomes 1 and 2 are indicated by a valid response message (possibly an // empty one) and no error. Outcome 3 is indicated by an error return. The @@ -83,11 +83,11 @@ func (r *Resolver) lookupWithSearchPath(name string, qtype dns.Type) (*dns.Msg, // // A "viable answer" is one which indicates either: // -// 1. "yes, I know that name, and here are its records of the requested type" -// (RCODE==SUCCESS, ANCOUNT > 0); -// 2. "yes, I know that name, but it has no records of the requested type" -// (RCODE==SUCCESS, ANCOUNT==0); or -// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). +// 1. "yes, I know that name, and here are its records of the requested type" +// (RCODE==SUCCESS, ANCOUNT > 0); +// 2. "yes, I know that name, but it has no records of the requested type" +// (RCODE==SUCCESS, ANCOUNT==0); or +// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). // // A non-viable answer is "anything else", which encompasses both various // system-level problems (like network timeouts) and also diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_server.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_server.go index 5af2e60dbcb..d06c5261ddb 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_server.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/http/instrument_server.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/uber/jaeger-client-go" + "go.opentelemetry.io/otel/trace" ) // InstrumentationMiddleware holds necessary metrics to instrument an http.Server @@ -78,17 +79,36 @@ func httpInstrumentationHandler(baseLabels prometheus.Labels, metrics *defaultMe observer.Observe(time.Since(now).Seconds()) // If we find a tracingID we'll expose it as Exemplar. + var ( + traceID string + OTfound bool + ) + span := opentracing.SpanFromContext(r.Context()) if span != nil { spanCtx, ok := span.Context().(jaeger.SpanContext) if ok && spanCtx.IsSampled() { - observer.(prometheus.ExemplarObserver).ObserveWithExemplar( - time.Since(now).Seconds(), - prometheus.Labels{ - "traceID": spanCtx.TraceID().String(), - }, - ) + traceID = spanCtx.TraceID().String() } + + OTfound = ok + } + + // If OpenTracing span not found, try OTEL. + if !OTfound { + span := trace.SpanFromContext(r.Context()) + if span != nil { + traceID = span.SpanContext().SpanID().String() + } + } + + if traceID != "" { + observer.(prometheus.ExemplarObserver).ObserveWithExemplar( + time.Since(now).Seconds(), + prometheus.Labels{ + "traceID": traceID, + }, + ) } }), ), diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/tx_gauge.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/tx_gauge.go index a619b122e97..3e4e81f8ff3 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/extprom/tx_gauge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/tx_gauge.go @@ -78,7 +78,8 @@ func (tx *TxGaugeVec) Collect(ch chan<- prometheus.Metric) { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (tx *TxGaugeVec) With(labels prometheus.Labels) prometheus.Gauge { if tx.tx == nil { tx.ResetTx() @@ -89,7 +90,8 @@ func (tx *TxGaugeVec) With(labels prometheus.Labels) prometheus.Gauge { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (tx *TxGaugeVec) WithLabelValues(lvs ...string) prometheus.Gauge { if tx.tx == nil { tx.ResetTx() diff --git a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go index 35443d908ce..ae6ecf9872f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go +++ b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go @@ -32,13 +32,12 @@ var ( // // Example of use: // -// g := gate.New(r, 5) -// -// if err := g.Start(ctx); err != nil { -// return -// } -// defer g.Done() +// g := gate.New(r, 5) // +// if err := g.Start(ctx); err != nil { +// return +// } +// defer g.Done() type Gate interface { // Start initiates a new request and waits until it's our turn to fulfill a request. Start(ctx context.Context) error diff --git a/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go b/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go index 352893367df..c6cdead58ba 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go +++ b/vendor/github.com/thanos-io/thanos/pkg/querysharding/analyzer.go @@ -26,15 +26,16 @@ func NewQueryAnalyzer() *QueryAnalyzer { // Analyze analyzes a query and returns a QueryAnalysis. // Analyze uses the following algorithm: -// * if a query has subqueries, such as label_join or label_replace, -// or has functions which cannot be sharded, then treat the query as non shardable. -// * if the query's root expression has grouping labels, -// then treat the query as shardable by those labels. -// * if the query's root expression has no grouping labels, -// then walk the query and find the least common labelset -// used in grouping expressions. If non-empty, treat the query -// as shardable by those labels. -// * otherwise, treat the query as non-shardable. +// - if a query has subqueries, such as label_join or label_replace, +// or has functions which cannot be sharded, then treat the query as non shardable. +// - if the query's root expression has grouping labels, +// then treat the query as shardable by those labels. +// - if the query's root expression has no grouping labels, +// then walk the query and find the least common labelset +// used in grouping expressions. If non-empty, treat the query +// as shardable by those labels. +// - otherwise, treat the query as non-shardable. +// // The le label is excluded from sharding. func (a *QueryAnalyzer) Analyze(query string) (QueryAnalysis, error) { expr, err := parser.ParseExpr(query) diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go index 52d1a2e8a64..5e2cb11110d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go @@ -45,9 +45,9 @@ func NewRecordingRule(r *RecordingRule) *Rule { // Compare compares equal recording rules r1 and r2 and returns: // -// < 0 if r1 < r2 if rule r1 is lexically before rule r2 -// 0 if r1 == r2 -// > 0 if r1 > r2 if rule r1 is lexically after rule r2 +// < 0 if r1 < r2 if rule r1 is lexically before rule r2 +// 0 if r1 == r2 +// > 0 if r1 > r2 if rule r1 is lexically after rule r2 // // More formally, the ordering is determined in the following order: // @@ -133,9 +133,9 @@ func (r *Rule) GetLastEvaluation() time.Time { // Compare compares recording and alerting rules r1 and r2 and returns: // -// < 0 if r1 < r2 if rule r1 is not equal and lexically before rule r2 -// 0 if r1 == r2 if rule r1 is logically equal to r2 (r1 and r2 are the "same" rules) -// > 0 if r1 > r2 if rule r1 is not equal and lexically after rule r2 +// < 0 if r1 < r2 if rule r1 is not equal and lexically before rule r2 +// 0 if r1 == r2 if rule r1 is logically equal to r2 (r1 and r2 are the "same" rules) +// > 0 if r1 > r2 if rule r1 is not equal and lexically after rule r2 // // More formally, ordering and equality is determined in the following order: // @@ -188,9 +188,9 @@ func (r *RuleGroups) MarshalJSON() ([]byte, error) { // Compare compares rule group x and y and returns: // -// < 0 if x < y if rule group r1 is not equal and lexically before rule group r2 -// 0 if x == y if rule group r1 is logically equal to r2 (r1 and r2 are the "same" rule groups) -// > 0 if x > y if rule group r1 is not equal and lexically after rule group r2 +// < 0 if x < y if rule group r1 is not equal and lexically before rule group r2 +// 0 if x == y if rule group r1 is logically equal to r2 (r1 and r2 are the "same" rule groups) +// > 0 if x > y if rule group r1 is not equal and lexically after rule group r2 func (r1 *RuleGroup) Compare(r2 *RuleGroup) int { return strings.Compare(r1.Key(), r2.Key()) } @@ -293,9 +293,9 @@ func (x *AlertState) MarshalJSON() ([]byte, error) { // Compare compares alert state x and y and returns: // -// < 0 if x < y (alert state x is more critical than alert state y) -// 0 if x == y -// > 0 if x > y (alert state x is less critical than alert state y) +// < 0 if x < y (alert state x is more critical than alert state y) +// 0 if x == y +// > 0 if x > y (alert state x is less critical than alert state y) // // For sorting this makes sure that more "critical" alert states come first. func (x AlertState) Compare(y AlertState) int { @@ -304,9 +304,9 @@ func (x AlertState) Compare(y AlertState) int { // Compare compares two equal alerting rules a1 and a2 and returns: // -// < 0 if a1 < a2 if rule a1 is lexically before rule a2 -// 0 if a1 == a2 -// > 0 if a1 > a2 if rule a1 is lexically after rule a2 +// < 0 if a1 < a2 if rule a1 is lexically before rule a2 +// 0 if a1 == a2 +// > 0 if a1 > a2 if rule a1 is lexically after rule a2 // // More formally, the ordering is determined in the following order: // diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go index 2cbaff2c524..2089c9b3937 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go @@ -36,16 +36,16 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -/// AlertState represents state of the alert. Has to match 1:1 Prometheus AlertState: +// / AlertState represents state of the alert. Has to match 1:1 Prometheus AlertState: // // StateInactive is the state of an alert that is neither firing nor pending. -//StateInactive AlertState = iota +// StateInactive AlertState = iota // StatePending is the state of an alert that has been active for less than // the configured threshold duration. -//StatePending +// StatePending // StateFiring is the state of an alert that has been active for longer than // the configured threshold duration. -//StateFiring +// StateFiring type AlertState int32 const ( @@ -229,12 +229,12 @@ func (*RulesResponse) XXX_OneofWrappers() []interface{} { } } -/// RuleGroups is set of rule groups. -/// This and below APIs are meant to be used for unmarshaling and marshsaling rules from/to Prometheus API. -/// That's why json tag has to be customized and matching https://github.com/prometheus/prometheus/blob/c530b4b456cc5f9ec249f771dff187eb7715dc9b/web/api/v1/api.go#L955 -/// NOTE: See rules_custom_test.go for compatibility tests. -/// -/// For rule parsing from YAML configuration other struct is used: https://github.com/prometheus/prometheus/blob/20b1f596f6fb16107ef0c244d240b0ad6da36829/pkg/rulefmt/rulefmt.go#L105 +// / RuleGroups is set of rule groups. +// / This and below APIs are meant to be used for unmarshaling and marshsaling rules from/to Prometheus API. +// / That's why json tag has to be customized and matching https://github.com/prometheus/prometheus/blob/c530b4b456cc5f9ec249f771dff187eb7715dc9b/web/api/v1/api.go#L955 +// / NOTE: See rules_custom_test.go for compatibility tests. +// / +// / For rule parsing from YAML configuration other struct is used: https://github.com/prometheus/prometheus/blob/20b1f596f6fb16107ef0c244d240b0ad6da36829/pkg/rulefmt/rulefmt.go#L105 type RuleGroups struct { Groups []*RuleGroup `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups"` } @@ -272,7 +272,7 @@ func (m *RuleGroups) XXX_DiscardUnknown() { var xxx_messageInfo_RuleGroups proto.InternalMessageInfo -/// RuleGroup has info for rules which are part of a group. +// / RuleGroup has info for rules which are part of a group. type RuleGroup struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name"` File string `protobuf:"bytes,2,opt,name=file,proto3" json:"file"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index 468b61e6936..ab8665f4519 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -9,36 +9,36 @@ // // For repeat executes, use Repeat: // -// err := runutil.Repeat(10*time.Second, stopc, func() error { -// // ... -// }) +// err := runutil.Repeat(10*time.Second, stopc, func() error { +// // ... +// }) // // Retry starts executing closure function f until no error is returned from f: // -// err := runutil.Retry(10*time.Second, stopc, func() error { -// // ... -// }) +// err := runutil.Retry(10*time.Second, stopc, func() error { +// // ... +// }) // // For logging an error on each f error, use RetryWithLog: // -// err := runutil.RetryWithLog(logger, 10*time.Second, stopc, func() error { -// // ... -// }) +// err := runutil.RetryWithLog(logger, 10*time.Second, stopc, func() error { +// // ... +// }) // // Another use case for runutil package is when you want to close a `Closer` interface. As we all know, we should close all implements of `Closer`, such as *os.File. Commonly we will use: // -// defer closer.Close() +// defer closer.Close() // // The problem is that Close() usually can return important error e.g for os.File the actual file flush might happen (and fail) on `Close` method. It's important to *always* check error. Thanos provides utility functions to log every error like those, allowing to put them in convenient `defer`: // -// defer runutil.CloseWithLogOnErr(logger, closer, "log format message") +// defer runutil.CloseWithLogOnErr(logger, closer, "log format message") // // For capturing error, use CloseWithErrCapture: // -// var err error -// defer runutil.CloseWithErrCapture(&err, closer, "log format message") +// var err error +// defer runutil.CloseWithErrCapture(&err, closer, "log format message") // -// // ... +// // ... // // If Close() returns error, err will capture it and return by argument. // diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go deleted file mode 100644 index 297434ef728..00000000000 --- a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package store - -import ( - "context" - "fmt" - "io" - "math" - "sync" - - "github.com/go-kit/log" - grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/tracing" - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/model/labels" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/thanos-io/thanos/pkg/component" - "github.com/thanos-io/thanos/pkg/errutil" - "github.com/thanos-io/thanos/pkg/runutil" - "github.com/thanos-io/thanos/pkg/store/labelpb" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/tracing" -) - -// InfoStoreServer packs a store server with extra methods in order -// to be able to obtain information about a particular store. -type InfoStoreServer interface { - storepb.StoreServer - LabelSet() []labelpb.ZLabelSet - TimeRange() (int64, int64) -} - -// MultiTSDBStore implements the Store interface backed by multiple TSDBStore instances. -// TODO(bwplotka): Remove this and use Proxy instead. Details: https://github.com/thanos-io/thanos/issues/2864 -type MultiTSDBStore struct { - logger log.Logger - component component.SourceStoreAPI - tsdbStores func() map[string]InfoStoreServer -} - -// NewMultiTSDBStore creates a new MultiTSDBStore. -func NewMultiTSDBStore(logger log.Logger, _ prometheus.Registerer, component component.SourceStoreAPI, tsdbStores func() map[string]InfoStoreServer) *MultiTSDBStore { - if logger == nil { - logger = log.NewNopLogger() - } - return &MultiTSDBStore{ - logger: logger, - component: component, - tsdbStores: tsdbStores, - } -} - -// Info returns store merged information about the underlying TSDBStore instances. -func (s *MultiTSDBStore) Info(ctx context.Context, req *storepb.InfoRequest) (*storepb.InfoResponse, error) { - stores := s.tsdbStores() - - resp := &storepb.InfoResponse{ - StoreType: s.component.ToProto(), - } - if len(stores) == 0 { - return resp, nil - } - - infos := make([]*storepb.InfoResponse, 0, len(stores)) - for tenant, store := range stores { - info, err := store.Info(ctx, req) - if err != nil { - return nil, errors.Wrapf(err, "get info for tenant %s", tenant) - } - infos = append(infos, info) - } - - resp.MinTime = infos[0].MinTime - resp.MaxTime = infos[0].MaxTime - - for i := 1; i < len(infos); i++ { - if resp.MinTime > infos[i].MinTime { - resp.MinTime = infos[i].MinTime - } - if resp.MaxTime < infos[i].MaxTime { - resp.MaxTime = infos[i].MaxTime - } - } - - // We can rely on every underlying TSDB to only have one labelset, so this - // will always allocate the correct length immediately. - resp.LabelSets = make([]labelpb.ZLabelSet, 0, len(infos)) - for _, info := range infos { - resp.LabelSets = append(resp.LabelSets, info.LabelSets...) - } - - return resp, nil -} - -func (s *MultiTSDBStore) LabelSet() []labelpb.ZLabelSet { - stores := s.tsdbStores() - if len(stores) == 0 { - return []labelpb.ZLabelSet{} - } - - // We can rely on every underlying TSDB to only have one labelset, so this - // will always allocate the correct length immediately. - lsets := make([]labelpb.ZLabelSet, 0, len(stores)) - for _, store := range stores { - lsets = append(lsets, store.LabelSet()...) - } - - return lsets -} - -func (s *MultiTSDBStore) TimeRange() (int64, int64) { - stores := s.tsdbStores() - if len(stores) == 0 { - return math.MinInt64, math.MaxInt64 - } - - var minTime, maxTime int64 = math.MaxInt64, math.MinInt64 - for _, store := range stores { - storeMinTime, storeMaxTime := store.TimeRange() - if minTime > storeMinTime { - minTime = storeMinTime - } - if maxTime < storeMaxTime { - maxTime = storeMaxTime - } - } - - return minTime, maxTime -} - -type tenantSeriesSetServer struct { - grpc.ServerStream - - ctx context.Context - - directCh directSender - recv chan *storepb.Series - cur *storepb.Series - - err error - tenant string - - closers []io.Closer -} - -// TODO(bwplotka): Remove tenant awareness; keep it simple with single functionality. -// Details https://github.com/thanos-io/thanos/issues/2864. -func newTenantSeriesSetServer( - ctx context.Context, - tenant string, - directCh directSender, -) *tenantSeriesSetServer { - return &tenantSeriesSetServer{ - ctx: ctx, - tenant: tenant, - directCh: directCh, - recv: make(chan *storepb.Series), - } -} - -func (s *tenantSeriesSetServer) Context() context.Context { return s.ctx } - -func (s *tenantSeriesSetServer) Series(store storepb.StoreServer, r *storepb.SeriesRequest) { - var err error - tracing.DoInSpan(s.ctx, "multitsdb_tenant_series", func(_ context.Context) { - err = store.Series(r, s) - }, opentracing.Tags{"tenant.id": s.tenant}) - if err != nil { - if r.PartialResponseDisabled || r.PartialResponseStrategy == storepb.PartialResponseStrategy_ABORT { - s.err = errors.Wrapf(err, "get series for tenant %s", s.tenant) - } else { - // Consistently prefix tenant specific warnings as done in various other places. - err = errors.New(prefixTenantWarning(s.tenant, err.Error())) - s.directCh.send(storepb.NewWarnSeriesResponse(err)) - } - } - close(s.recv) -} - -func (s *tenantSeriesSetServer) Send(r *storepb.SeriesResponse) error { - series := r.GetSeries() - if series == nil { - // Proxy non series responses directly to client - s.directCh.send(r) - return nil - } - - // TODO(bwplotka): Consider avoid copying / learn why it has to copied. - chunks := make([]storepb.AggrChunk, len(series.Chunks)) - copy(chunks, series.Chunks) - - // For series, pass it to our AggChunkSeriesSet. - select { - case <-s.ctx.Done(): - return s.ctx.Err() - case s.recv <- &storepb.Series{ - Labels: series.Labels, - Chunks: chunks, - }: - return nil - } -} - -func (s *tenantSeriesSetServer) Delegate(closer io.Closer) { - s.closers = append(s.closers, closer) -} - -func (s *tenantSeriesSetServer) Close() error { - var merr errutil.MultiError - for _, c := range s.closers { - merr.Add(c.Close()) - } - return merr.Err() -} - -func (s *tenantSeriesSetServer) Next() (ok bool) { - s.cur, ok = <-s.recv - return ok -} - -func (s *tenantSeriesSetServer) At() (labels.Labels, []storepb.AggrChunk) { - if s.cur == nil { - return nil, nil - } - return s.cur.PromLabels(), s.cur.Chunks -} - -func (s *tenantSeriesSetServer) Err() error { return s.err } - -// Series returns all series for a requested time range and label matcher. The -// returned data may exceed the requested time bounds. The data returned may -// have been read and merged from multiple underlying TSDBStore instances. -func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - span, ctx := tracing.StartSpan(srv.Context(), "multitsdb_series") - defer span.Finish() - - stores := s.tsdbStores() - if len(stores) == 0 { - return nil - } - - g, gctx := errgroup.WithContext(ctx) - - // Allow to buffer max 10 series response. - // Each might be quite large (multi chunk long series given by sidecar). - respSender, respCh := newCancelableRespChannel(gctx, 10) - - var closers []io.Closer - g.Go(func() error { - // This go routine is responsible for calling store's Series concurrently. Merged results - // are passed to respCh and sent concurrently to client (if buffer of 10 have room). - // When this go routine finishes or is canceled, respCh channel is closed. - - var ( - seriesSet []storepb.SeriesSet - wg = &sync.WaitGroup{} - ) - - defer func() { - wg.Wait() - close(respCh) - }() - - for tenant, store := range stores { - store := store - seriesCtx, cancelSeries := context.WithCancel(ctx) - seriesCtx = grpc_opentracing.ClientAddContextTags(seriesCtx, opentracing.Tags{ - "tenant": tenant, - }) - defer cancelSeries() - ss := newTenantSeriesSetServer(seriesCtx, tenant, respSender) - wg.Add(1) - go func() { - defer wg.Done() - ss.Series(store, r) - }() - - closers = append(closers, ss) - seriesSet = append(seriesSet, ss) - } - - mergedSet := storepb.MergeSeriesSets(seriesSet...) - for mergedSet.Next() { - lset, chks := mergedSet.At() - respSender.send(storepb.NewSeriesResponse(&storepb.Series{ - Labels: labelpb.ZLabelsFromPromLabels(lset), - Chunks: chks, - })) - } - return mergedSet.Err() - }) - g.Go(func() error { - // Go routine for gathering merged responses and sending them over to client. It stops when - // respCh channel is closed OR on error from client. - for resp := range respCh { - if err := srv.Send(resp); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) - } - } - return nil - }) - err := g.Wait() - for _, c := range closers { - runutil.CloseWithLogOnErr(s.logger, c, "close tenant series request") - } - return err - -} - -// LabelNames returns all known label names constrained by the matchers. -func (s *MultiTSDBStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { - span, ctx := tracing.StartSpan(ctx, "multitsdb_label_names") - defer span.Finish() - - names := map[string]struct{}{} - warnings := map[string]struct{}{} - - stores := s.tsdbStores() - for tenant, store := range stores { - r, err := store.LabelNames(ctx, req) - if err != nil { - return nil, errors.Wrapf(err, "get label names for tenant %s", tenant) - } - - for _, l := range r.Names { - names[l] = struct{}{} - } - - for _, l := range r.Warnings { - warnings[prefixTenantWarning(tenant, l)] = struct{}{} - } - } - - return &storepb.LabelNamesResponse{ - Names: keys(names), - Warnings: keys(warnings), - }, nil -} - -func prefixTenantWarning(tenant, s string) string { - return fmt.Sprintf("[%s] %s", tenant, s) -} - -func keys(m map[string]struct{}) []string { - res := make([]string, 0, len(m)) - for k := range m { - res = append(res, k) - } - - return res -} - -// LabelValues returns all known label values for a given label name. -func (s *MultiTSDBStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { - span, ctx := tracing.StartSpan(ctx, "multitsdb_label_values") - defer span.Finish() - - values := map[string]struct{}{} - warnings := map[string]struct{}{} - - stores := s.tsdbStores() - for tenant, store := range stores { - r, err := store.LabelValues(ctx, req) - if err != nil { - return nil, errors.Wrapf(err, "get label values for tenant %s", tenant) - } - - for _, l := range r.Values { - values[l] = struct{}{} - } - - for _, l := range r.Warnings { - warnings[prefixTenantWarning(tenant, l)] = struct{}{} - } - } - - return &storepb.LabelValuesResponse{ - Values: keys(values), - Warnings: keys(warnings), - }, nil -} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index a68fab71ad8..508b4c62cab 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -13,6 +13,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -54,9 +55,12 @@ type Client interface { // received series before deduplication. SendsSortedSeries() bool + // String returns the string representation of the store client. String() string - // Addr returns address of a Client. - Addr() string + + // Addr returns address of the store client. If second parameter is true, the client + // represents a local client (server-as-client) and has no remote address. + Addr() (addr string, isLocalClient bool) } // ProxyStore implements the store API that proxies request to all given underlying stores. @@ -231,25 +235,6 @@ func (s *ProxyStore) TimeRange() (int64, int64) { return minTime, maxTime } -// cancelableRespSender is a response channel that does need to be exhausted on cancel. -type cancelableRespSender struct { - ctx context.Context - ch chan<- *storepb.SeriesResponse -} - -func newCancelableRespChannel(ctx context.Context, buffer int) (*cancelableRespSender, chan *storepb.SeriesResponse) { - respCh := make(chan *storepb.SeriesResponse, buffer) - return &cancelableRespSender{ctx: ctx, ch: respCh}, respCh -} - -// send or return on cancel. -func (s cancelableRespSender) send(r *storepb.SeriesResponse) { - select { - case <-s.ctx.Done(): - case s.ch <- r: - } -} - func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be // tiggered by tracing span to reduce cognitive load. @@ -345,10 +330,6 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. return nil } -type directSender interface { - send(*storepb.SeriesResponse) -} - // storeMatches returns boolean if the given store may hold data for the given label matchers, time ranges and debug store matches gathered from context. // It also produces tracing span. func storeMatches(ctx context.Context, s Client, mint, maxt int64, matchers ...*labels.Matcher) (ok bool, reason string) { @@ -384,12 +365,17 @@ func storeMatchDebugMetadata(s Client, storeDebugMatchers [][]*labels.Matcher) ( return true, "" } + addr, isLocal := s.Addr() + if isLocal { + return false, "the store is not remote, cannot match __address__" + } + match := false for _, sm := range storeDebugMatchers { - match = match || labelSetsMatch(sm, labels.FromStrings("__address__", s.Addr())) + match = match || labelSetsMatch(sm, labels.FromStrings("__address__", addr)) } if !match { - return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", s.Addr(), storeDebugMatchers) + return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", addr, storeDebugMatchers) } return true, "" } @@ -486,11 +472,24 @@ func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequ mtx sync.Mutex g, gctx = errgroup.WithContext(ctx) storeDebugMsgs []string + span opentracing.Span ) for _, st := range s.stores() { st := st + storeAddr, isLocalStore := st.Addr() + storeID := labelpb.PromLabelSetsToString(st.LabelSets()) + if storeID == "" { + storeID = "Store Gateway" + } + span, gctx = tracing.StartSpan(gctx, "proxy.label_values", tracing.Tags{ + "store.id": storeID, + "store.addr": storeAddr, + "store.is_local": isLocalStore, + }) + defer span.Finish() + // We might be able to skip the store if its meta information indicates it cannot have series matching our query. if ok, reason := storeMatches(gctx, st, r.Start, r.End); !ok { storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to %v", st, reason)) @@ -507,13 +506,14 @@ func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequ Matchers: r.Matchers, }) if err != nil { - err = errors.Wrapf(err, "fetch label values from store %s", st) + msg := "fetch label values from store %s" + err = errors.Wrapf(err, msg, st) if r.PartialResponseDisabled { return err } mtx.Lock() - warnings = append(warnings, errors.Wrap(err, "fetch label values").Error()) + warnings = append(warnings, errors.Wrapf(err, msg, st).Error()) mtx.Unlock() return nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go index c2a79c153e1..822633b6a7b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_heap.go @@ -112,7 +112,7 @@ func (d *dedupResponseHeap) Next() bool { func (d *dedupResponseHeap) At() *storepb.SeriesResponse { if len(d.responses) == 0 { - return nil + panic("BUG: At() called with no responses; please call At() only if Next() returns true") } else if len(d.responses) == 1 { return d.responses[0] } @@ -293,6 +293,19 @@ func (l *lazyRespSet) Empty() bool { l.bufferedResponsesMtx.Lock() defer l.bufferedResponsesMtx.Unlock() + // NOTE(GiedriusS): need to wait here for at least one + // response so that we could build the heap properly. + if l.noMoreData && len(l.bufferedResponses) == 0 { + return true + } + + for len(l.bufferedResponses) == 0 { + l.dataOrFinishEvent.Wait() + if l.noMoreData && len(l.bufferedResponses) == 0 { + break + } + } + return len(l.bufferedResponses) == 0 && l.noMoreData } @@ -494,18 +507,20 @@ func newAsyncRespSet(ctx context.Context, var span opentracing.Span var closeSeries context.CancelFunc + storeAddr, isLocalStore := st.Addr() storeID := labelpb.PromLabelSetsToString(st.LabelSets()) if storeID == "" { storeID = "Store Gateway" } seriesCtx := grpc_opentracing.ClientAddContextTags(ctx, opentracing.Tags{ - "target": st.Addr(), + "target": storeAddr, }) span, seriesCtx = tracing.StartSpan(seriesCtx, "proxy.series", tracing.Tags{ - "store.id": storeID, - "store.addr": st.Addr(), + "store.id": storeID, + "store.is_local": isLocalStore, + "store.addr": storeAddr, }) seriesCtx, closeSeries = context.WithCancel(seriesCtx) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go index 3dbc940ab16..ee028f4a33b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go @@ -26,7 +26,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -/// PartialResponseStrategy controls partial response handling. +// / PartialResponseStrategy controls partial response handling. type PartialResponseStrategy int32 const ( diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go index 4ace2e85c57..626f004c62b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/migration/bridge.go @@ -11,10 +11,9 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/opentracing/opentracing-go" - ot_propagator "go.opentelemetry.io/contrib/propagators/ot" + "go.opentelemetry.io/contrib/propagators/autoprop" "go.opentelemetry.io/otel" bridge "go.opentelemetry.io/otel/bridge/opentracing" - "go.opentelemetry.io/otel/propagation" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) @@ -27,18 +26,17 @@ import ( // NOTE: After instrumentation migration is finished, this bridge should be // removed. func Bridge(tp *tracesdk.TracerProvider, l log.Logger) (opentracing.Tracer, io.Closer) { - compositePropagator := propagation.NewCompositeTextMapPropagator(ot_propagator.OT{}, propagation.TraceContext{}, propagation.Baggage{}) otel.SetErrorHandler(otelErrHandler(func(err error) { level.Error(l).Log("msg", "OpenTelemetry ErrorHandler", "err", err) })) - otel.SetTextMapPropagator(compositePropagator) + otel.SetTextMapPropagator(autoprop.NewTextMapPropagator()) otel.SetTracerProvider(tp) bridgeTracer, _ := bridge.NewTracerPair(tp.Tracer("")) bridgeTracer.SetWarningHandler(func(warn string) { level.Warn(l).Log("msg", "OpenTelemetry BridgeWarningHandler", "warn", warn) }) - bridgeTracer.SetTextMapPropagator(propagation.TraceContext{}) + bridgeTracer.SetTextMapPropagator(autoprop.NewTextMapPropagator()) tpShutdownFunc := func() error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/testutil.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/testutil.go new file mode 100644 index 00000000000..5f4c081b54e --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/testutil.go @@ -0,0 +1,71 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package tracing + +import ( + "testing" + + "github.com/thanos-io/thanos/pkg/testutil" + + opentracing "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/sdk/trace/tracetest" +) + +func CountSpans_ClientEnablesTracing(t *testing.T, exp *tracetest.InMemoryExporter, clientRoot, srvRoot, srvChild opentracing.Span) { + testutil.Equals(t, 0, len(exp.GetSpans())) + + srvChild.Finish() + testutil.Equals(t, 1, len(exp.GetSpans())) + testutil.Equals(t, 1, CountSampledSpans(exp.GetSpans())) + + srvRoot.Finish() + testutil.Equals(t, 2, len(exp.GetSpans())) + testutil.Equals(t, 2, CountSampledSpans(exp.GetSpans())) + + clientRoot.Finish() + testutil.Equals(t, 3, len(exp.GetSpans())) + testutil.Equals(t, 3, CountSampledSpans(exp.GetSpans())) +} + +func ContextTracing_ClientDisablesTracing(t *testing.T, exp *tracetest.InMemoryExporter, clientRoot, srvRoot, srvChild opentracing.Span) { + testutil.Equals(t, 0, len(exp.GetSpans())) + + // Since we are not recording neither sampling, no spans should show up. + srvChild.Finish() + testutil.Equals(t, 0, len(exp.GetSpans())) + + srvRoot.Finish() + testutil.Equals(t, 0, len(exp.GetSpans())) + + clientRoot.Finish() + testutil.Equals(t, 0, len(exp.GetSpans())) +} + +func ContextTracing_ForceTracing(t *testing.T, exp *tracetest.InMemoryExporter, clientRoot, srvRoot, srvChild opentracing.Span) { + testutil.Equals(t, 0, len(exp.GetSpans())) + + srvChild.Finish() + testutil.Equals(t, 1, len(exp.GetSpans())) + testutil.Equals(t, 1, CountSampledSpans(exp.GetSpans())) + + srvRoot.Finish() + testutil.Equals(t, 2, len(exp.GetSpans())) + testutil.Equals(t, 2, CountSampledSpans(exp.GetSpans())) + + clientRoot.Finish() + testutil.Equals(t, 3, len(exp.GetSpans())) + testutil.Equals(t, 3, CountSampledSpans(exp.GetSpans())) +} + +// Utility function for use with tests in pkg/tracing. +func CountSampledSpans(ss tracetest.SpanStubs) int { + var count int + for _, s := range ss { + if s.SpanContext.IsSampled() { + count++ + } + } + + return count +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 00000000000..c40973c8d43 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index 807c2d1c1ca..d0337f3a5e4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -166,10 +166,10 @@ const ( // end of the request. // // Valid events are: -// * ReadEvents: Record the number of bytes read after every http.Request.Body.Read -// using the ReadBytesKey -// * WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write -// using the WriteBytesKey +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey func WithMessageEvents(events ...event) Option { return optionFunc(func(c *config) { for _, e := range events { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index a172a651664..05c1e08f161 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.34.0" + return "0.36.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/github.com/googleapis/go-type-adapters/LICENSE b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/LICENSE similarity index 99% rename from vendor/github.com/googleapis/go-type-adapters/LICENSE rename to vendor/go.opentelemetry.io/contrib/propagators/autoprop/LICENSE index ff9ad4530f5..261eeb9e9f8 100644 --- a/vendor/github.com/googleapis/go-type-adapters/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -193,7 +192,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/go.opentelemetry.io/contrib/propagators/autoprop/doc.go b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/doc.go new file mode 100644 index 00000000000..0f09e745747 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/doc.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package autoprop provides an OpenTelemetry TextMapPropagator creation +// function. The OpenTelemetry specification states that the default +// TextMapPropagator needs to be a no-operation implementation. The +// opentelemetry-go project adheres to this requirement. However, for systems +// that perform propagation this default is not ideal. This package provides a +// TextMapPropagator with useful defaults (a combined TraceContext and Baggage +// TextMapPropagator), and supports environment overrides using the +// OTEL_PROPAGATORS environment variable. +package autoprop // import "go.opentelemetry.io/contrib/propagators/autoprop" diff --git a/vendor/go.opentelemetry.io/contrib/propagators/autoprop/propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/propagator.go new file mode 100644 index 00000000000..29ac8580a64 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/propagator.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoprop // import "go.opentelemetry.io/contrib/propagators/autoprop" + +import ( + "errors" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" +) + +// otelPropagatorsEnvKey is the environment variable name identifying +// propagators to use. +const otelPropagatorsEnvKey = "OTEL_PROPAGATORS" + +// NewTextMapPropagator returns a new TextMapPropagator composited by props or +// one defined by the OTEL_PROPAGATORS environment variable. The +// TextMapPropagator defined by OTEL_PROPAGATORS, if set, will take precedence +// to the once composited by props. +// +// The propagators supported with the OTEL_PROPAGATORS environment variable by +// default are: tracecontext, baggage, b3, b3multi, jaeger, xray, ottrace, and +// none. Each of these values, and their combination, are supported in +// conformance with the OpenTelemetry specification. See +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration +// for more information. +// +// The supported environment variable propagators can be extended to include +// custom 3rd-party TextMapPropagator. See the RegisterTextMapPropagator +// function for more information. +// +// If OTEL_PROPAGATORS is not defined and props is no provided, the returned +// TextMapPropagator will be a composite of the TraceContext and Baggage +// propagators. +func NewTextMapPropagator(props ...propagation.TextMapPropagator) propagation.TextMapPropagator { + // Environment variable defined propagator has precedence over arguments. + envProp, err := parseEnv() + if err != nil { + // Communicate to the user their supplied value will not be used. + otel.Handle(err) + } + if envProp != nil { + return envProp + } + + switch len(props) { + case 0: + // Default to TraceContext and Baggage. + return propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, propagation.Baggage{}, + ) + case 1: + // Do not add overhead with a composite propagator wrapping a single + // propagator, return it directly. + return props[0] + default: + return propagation.NewCompositeTextMapPropagator(props...) + } +} + +// errUnknownPropagator is returned when an unknown propagator name is used in +// the OTEL_PROPAGATORS environment variable. +var errUnknownPropagator = errors.New("unknown propagator") + +// parseEnv returns the composite TextMapPropagators defined by the +// OTEL_PROPAGATORS environment variable. A nil TextMapPropagator is returned +// if no propagator is defined for the environment variable. A no-op +// TextMapPropagator will be returned if "none" is defined anywhere in the +// environment variable. +func parseEnv() (propagation.TextMapPropagator, error) { + propStrs, defined := os.LookupEnv(otelPropagatorsEnvKey) + if !defined { + return nil, nil + } + return TextMapPropagator(strings.Split(propStrs, ",")...) +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/autoprop/registry.go b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/registry.go new file mode 100644 index 00000000000..a831773f3d0 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/autoprop/registry.go @@ -0,0 +1,169 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autoprop // import "go.opentelemetry.io/contrib/propagators/autoprop" + +import ( + "errors" + "fmt" + "strings" + "sync" + + "go.opentelemetry.io/contrib/propagators/aws/xray" + "go.opentelemetry.io/contrib/propagators/b3" + "go.opentelemetry.io/contrib/propagators/jaeger" + "go.opentelemetry.io/contrib/propagators/ot" + "go.opentelemetry.io/otel/propagation" +) + +// none is the special "propagator" name that means no propagator shall be +// configured. +const none = "none" + +// propagators is the registry of TextMapPropagators registered with this +// package. It includes all the OpenTelemetry defaults at startup. +var propagators = ®istry{ + names: map[string]propagation.TextMapPropagator{ + // W3C Trace Context. + "tracecontext": propagation.TraceContext{}, + // W3C Baggage. + "baggage": propagation.Baggage{}, + // B3 single-header format. + "b3": b3.New(), + // B3 multi-header format. + "b3multi": b3.New(b3.WithInjectEncoding(b3.B3MultipleHeader)), + // Jaeger. + "jaeger": jaeger.Jaeger{}, + // AWS X-Ray. + "xray": xray.Propagator{}, + // OpenTracing Trace. + "ottrace": ot.OT{}, + + // No-op TextMapPropagator. + none: propagation.NewCompositeTextMapPropagator(), + }, +} + +// registry maintains a map of propagator names to TextMapPropagator +// implementations that is safe for concurrent use by multiple goroutines +// without additional locking or coordination. +type registry struct { + mu sync.Mutex + names map[string]propagation.TextMapPropagator +} + +// load returns the value stored in the registry index for a key, or nil if no +// value is present. The ok result indicates whether value was found in the +// index. +func (r *registry) load(key string) (p propagation.TextMapPropagator, ok bool) { + r.mu.Lock() + p, ok = r.names[key] + r.mu.Unlock() + return p, ok +} + +var errDupReg = errors.New("duplicate registration") + +// store sets the value for a key if is not already in the registry. errDupReg +// is returned if the registry already contains key. +func (r *registry) store(key string, value propagation.TextMapPropagator) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.names == nil { + r.names = map[string]propagation.TextMapPropagator{key: value} + return nil + } + if _, ok := r.names[key]; ok { + return fmt.Errorf("%w: %q", errDupReg, key) + } + r.names[key] = value + return nil +} + +// drop removes key from the registry if it exists, otherwise nothing. +func (r *registry) drop(key string) { + r.mu.Lock() + delete(r.names, key) + r.mu.Unlock() +} + +// RegisterTextMapPropagator sets the TextMapPropagator p to be used when the +// OTEL_PROPAGATORS environment variable contains the propagator name. This +// will panic if name has already been registered or is a default +// (tracecontext, baggage, b3, b3multi, jaeger, xray, or ottrace). +func RegisterTextMapPropagator(name string, p propagation.TextMapPropagator) { + if err := propagators.store(name, p); err != nil { + // envRegistry.store will return errDupReg if name is already + // registered. Panic here so the user is made aware of the duplicate + // registration, which could be done by malicious code trying to + // intercept cross-cutting concerns. + // + // Panic for all other errors as well. At this point there should not + // be any other errors returned from the store operation. If there + // are, alert the developer that adding them as soon as possible that + // they need to be handled here. + panic(err) + } +} + +// TextMapPropagator returns a TextMapPropagator composed from the +// passed names of registered TextMapPropagators. Each name must match an +// already registered TextMapPropagator (see the RegisterTextMapPropagator +// function for more information) or a default (tracecontext, baggage, b3, +// b3multi, jaeger, xray, or ottrace). +// +// If "none" is included in the arguments, or no names are provided, the +// returned TextMapPropagator will be a no-operation implementation. +// +// An error is returned for any un-registered names. The remaining, known, +// names will be used to compose a TextMapPropagator that is returned with the +// error. +func TextMapPropagator(names ...string) (propagation.TextMapPropagator, error) { + var ( + props []propagation.TextMapPropagator + unknown []string + ) + + for _, name := range names { + if name == none { + // If "none" is passed in combination with any other propagator, + // the result still needs to be a no-op propagator. Therefore, + // short-circuit here. + return propagation.NewCompositeTextMapPropagator(), nil + } + + p, ok := propagators.load(name) + if !ok { + unknown = append(unknown, name) + continue + } + props = append(props, p) + } + + var err error + if len(unknown) > 0 { + joined := strings.Join(unknown, ",") + err = fmt.Errorf("%w: %s", errUnknownPropagator, joined) + } + + switch len(props) { + case 0: + return nil, err + case 1: + // Do not return a composite of a single propagator. + return props[0], err + default: + return propagation.NewCompositeTextMapPropagator(props...), err + } +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/LICENSE b/vendor/go.opentelemetry.io/contrib/propagators/b3/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_config.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_config.go new file mode 100644 index 00000000000..b09dbd3fb30 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_config.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package b3 // import "go.opentelemetry.io/contrib/propagators/b3" + +type config struct { + // InjectEncoding are the B3 encodings used when injecting trace + // information. If no encoding is specified (i.e. `B3Unspecified`) + // `B3SingleHeader` will be used as the default. + InjectEncoding Encoding +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{} + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// Encoding is a bitmask representation of the B3 encoding type. +type Encoding uint8 + +// supports returns if e has o bit(s) set. +func (e Encoding) supports(o Encoding) bool { + return e&o == o +} + +const ( + // B3Unspecified is an unspecified B3 encoding. + B3Unspecified Encoding = 0 + // B3MultipleHeader is a B3 encoding that uses multiple headers to + // transmit tracing information all prefixed with `x-b3-`. + // x-b3-traceid: {TraceId} + // x-b3-parentspanid: {ParentSpanId} + // x-b3-spanid: {SpanId} + // x-b3-sampled: {SamplingState} + // x-b3-flags: {DebugFlag} + B3MultipleHeader Encoding = 1 << iota + // B3SingleHeader is a B3 encoding that uses a single header named `b3` + // to transmit tracing information. + // b3: {TraceId}-{SpanId}-{SamplingState}-{ParentSpanId} + B3SingleHeader +) + +// WithInjectEncoding sets the encoding the propagator will inject. +// The encoding is interpreted as a bitmask. Therefore +// WithInjectEncoding(B3SingleHeader | B3MultipleHeader) +// means the propagator will inject both single and multi B3 headers. +func WithInjectEncoding(encoding Encoding) Option { + return optionFunc(func(c *config) { + c.InjectEncoding = encoding + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go new file mode 100644 index 00000000000..17dfa0ba558 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go @@ -0,0 +1,341 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package b3 // import "go.opentelemetry.io/contrib/propagators/b3" + +import ( + "context" + "errors" + "strings" + + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + // Default B3 Header names. + b3ContextHeader = "b3" + b3DebugFlagHeader = "x-b3-flags" + b3TraceIDHeader = "x-b3-traceid" + b3SpanIDHeader = "x-b3-spanid" + b3SampledHeader = "x-b3-sampled" + b3ParentSpanIDHeader = "x-b3-parentspanid" + + b3TraceIDPadding = "0000000000000000" + + // B3 Single Header encoding widths. + separatorWidth = 1 // Single "-" character. + samplingWidth = 1 // Single hex character. + traceID64BitsWidth = 64 / 4 // 16 hex character Trace ID. + traceID128BitsWidth = 128 / 4 // 32 hex character Trace ID. + spanIDWidth = 16 // 16 hex character ID. + parentSpanIDWidth = 16 // 16 hex character ID. +) + +var ( + empty = trace.SpanContext{} + + errInvalidSampledByte = errors.New("invalid B3 Sampled found") + errInvalidSampledHeader = errors.New("invalid B3 Sampled header found") + errInvalidTraceIDHeader = errors.New("invalid B3 traceID header found") + errInvalidSpanIDHeader = errors.New("invalid B3 spanID header found") + errInvalidParentSpanIDHeader = errors.New("invalid B3 ParentSpanID header found") + errInvalidScope = errors.New("require either both traceID and spanID or none") + errInvalidScopeParent = errors.New("traceID and spanID required for ParentSpanID") + errInvalidScopeParentSingle = errors.New("traceID, spanID and Sampled required for ParentSpanID") + errEmptyContext = errors.New("empty request context") + errInvalidTraceIDValue = errors.New("invalid B3 traceID value found") + errInvalidSpanIDValue = errors.New("invalid B3 spanID value found") + errInvalidParentSpanIDValue = errors.New("invalid B3 ParentSpanID value found") +) + +type propagator struct { + cfg config +} + +var _ propagation.TextMapPropagator = propagator{} + +// New creates a B3 implementation of propagation.TextMapPropagator. +// B3 propagator serializes SpanContext to/from B3 Headers. +// This propagator supports both versions of B3 headers, +// 1. Single Header: +// b3: {TraceId}-{SpanId}-{SamplingState}-{ParentSpanId} +// 2. Multiple Headers: +// x-b3-traceid: {TraceId} +// x-b3-parentspanid: {ParentSpanId} +// x-b3-spanid: {SpanId} +// x-b3-sampled: {SamplingState} +// x-b3-flags: {DebugFlag} +// The Single Header propagator is used by default. +func New(opts ...Option) propagation.TextMapPropagator { + cfg := newConfig(opts...) + return propagator{ + cfg: *cfg, + } +} + +// Inject injects a context into the carrier as B3 headers. +// The parent span ID is omitted because it is not tracked in the +// SpanContext. +func (b3 propagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + sc := trace.SpanFromContext(ctx).SpanContext() + + if b3.cfg.InjectEncoding.supports(B3SingleHeader) || b3.cfg.InjectEncoding == B3Unspecified { + header := []string{} + if sc.TraceID().IsValid() && sc.SpanID().IsValid() { + header = append(header, sc.TraceID().String(), sc.SpanID().String()) + } + + if debugFromContext(ctx) { + header = append(header, "d") + } else if !(deferredFromContext(ctx)) { + if sc.IsSampled() { + header = append(header, "1") + } else { + header = append(header, "0") + } + } + + carrier.Set(b3ContextHeader, strings.Join(header, "-")) + } + + if b3.cfg.InjectEncoding.supports(B3MultipleHeader) { + if sc.TraceID().IsValid() && sc.SpanID().IsValid() { + carrier.Set(b3TraceIDHeader, sc.TraceID().String()) + carrier.Set(b3SpanIDHeader, sc.SpanID().String()) + } + + if debugFromContext(ctx) { + // Since Debug implies deferred, don't also send "X-B3-Sampled". + carrier.Set(b3DebugFlagHeader, "1") + } else if !(deferredFromContext(ctx)) { + if sc.IsSampled() { + carrier.Set(b3SampledHeader, "1") + } else { + carrier.Set(b3SampledHeader, "0") + } + } + } +} + +// Extract extracts a context from the carrier if it contains B3 headers. +func (b3 propagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + var ( + sc trace.SpanContext + err error + ) + + // Default to Single Header if a valid value exists. + if h := carrier.Get(b3ContextHeader); h != "" { + ctx, sc, err = extractSingle(ctx, h) + if err == nil && sc.IsValid() { + return trace.ContextWithRemoteSpanContext(ctx, sc) + } + // The Single Header value was invalid, fallback to Multiple Header. + } + + var ( + traceID = carrier.Get(b3TraceIDHeader) + spanID = carrier.Get(b3SpanIDHeader) + parentSpanID = carrier.Get(b3ParentSpanIDHeader) + sampled = carrier.Get(b3SampledHeader) + debugFlag = carrier.Get(b3DebugFlagHeader) + ) + ctx, sc, err = extractMultiple(ctx, traceID, spanID, parentSpanID, sampled, debugFlag) + if err != nil || !sc.IsValid() { + // clear the deferred flag if we don't have a valid SpanContext + return withDeferred(ctx, false) + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (b3 propagator) Fields() []string { + header := []string{} + if b3.cfg.InjectEncoding.supports(B3SingleHeader) { + header = append(header, b3ContextHeader) + } + if b3.cfg.InjectEncoding.supports(B3MultipleHeader) || b3.cfg.InjectEncoding == B3Unspecified { + header = append(header, b3TraceIDHeader, b3SpanIDHeader, b3SampledHeader, b3DebugFlagHeader) + } + return header +} + +// extractMultiple reconstructs a SpanContext from header values based on B3 +// Multiple header. It is based on the implementation found here: +// https://github.com/openzipkin/zipkin-go/blob/v0.2.2/propagation/b3/spancontext.go +// and adapted to support a SpanContext. +func extractMultiple(ctx context.Context, traceID, spanID, parentSpanID, sampled, flags string) (context.Context, trace.SpanContext, error) { + var ( + err error + requiredCount int + scc = trace.SpanContextConfig{} + ) + + // correct values for an existing sampled header are "0" and "1". + // For legacy support and being lenient to other tracing implementations we + // allow "true" and "false" as inputs for interop purposes. + switch strings.ToLower(sampled) { + case "0", "false": + // Zero value for TraceFlags sample bit is unset. + case "1", "true": + scc.TraceFlags = trace.FlagsSampled + case "": + ctx = withDeferred(ctx, true) + default: + return ctx, empty, errInvalidSampledHeader + } + + // The only accepted value for Flags is "1". This will set Debug bitmask and + // sampled bitmask to 1 since debug implicitly means sampled. All other + // values and omission of header will be ignored. According to the spec. User + // shouldn't send X-B3-Sampled header along with X-B3-Flags header. Thus we will + // ignore X-B3-Sampled header when X-B3-Flags header is sent and valid. + if flags == "1" { + ctx = withDeferred(ctx, false) + ctx = withDebug(ctx, true) + scc.TraceFlags |= trace.FlagsSampled + } + + if traceID != "" { + requiredCount++ + id := traceID + if len(traceID) == 16 { + // Pad 64-bit trace IDs. + id = b3TraceIDPadding + traceID + } + if scc.TraceID, err = trace.TraceIDFromHex(id); err != nil { + return ctx, empty, errInvalidTraceIDHeader + } + } + + if spanID != "" { + requiredCount++ + if scc.SpanID, err = trace.SpanIDFromHex(spanID); err != nil { + return ctx, empty, errInvalidSpanIDHeader + } + } + + if requiredCount != 0 && requiredCount != 2 { + return ctx, empty, errInvalidScope + } + + if parentSpanID != "" { + if requiredCount == 0 { + return ctx, empty, errInvalidScopeParent + } + // Validate parent span ID but we do not use it so do not save it. + if _, err = trace.SpanIDFromHex(parentSpanID); err != nil { + return ctx, empty, errInvalidParentSpanIDHeader + } + } + + return ctx, trace.NewSpanContext(scc), nil +} + +// extractSingle reconstructs a SpanContext from contextHeader based on a B3 +// Single header. It is based on the implementation found here: +// https://github.com/openzipkin/zipkin-go/blob/v0.2.2/propagation/b3/spancontext.go +// and adapted to support a SpanContext. +func extractSingle(ctx context.Context, contextHeader string) (context.Context, trace.SpanContext, error) { + if contextHeader == "" { + return ctx, empty, errEmptyContext + } + + var ( + scc = trace.SpanContextConfig{} + sampling string + ) + + headerLen := len(contextHeader) + + switch { + case headerLen == samplingWidth: + sampling = contextHeader + case headerLen == traceID64BitsWidth || headerLen == traceID128BitsWidth: + // Trace ID by itself is invalid. + return ctx, empty, errInvalidScope + case headerLen >= traceID64BitsWidth+spanIDWidth+separatorWidth: + pos := 0 + var traceID string + switch { + case string(contextHeader[traceID64BitsWidth]) == "-": + // traceID must be 64 bits + pos += traceID64BitsWidth // {traceID} + traceID = b3TraceIDPadding + string(contextHeader[0:pos]) + case string(contextHeader[32]) == "-": + // traceID must be 128 bits + pos += traceID128BitsWidth // {traceID} + traceID = string(contextHeader[0:pos]) + default: + return ctx, empty, errInvalidTraceIDValue + } + var err error + scc.TraceID, err = trace.TraceIDFromHex(traceID) + if err != nil { + return ctx, empty, errInvalidTraceIDValue + } + pos += separatorWidth // {traceID}- + + scc.SpanID, err = trace.SpanIDFromHex(contextHeader[pos : pos+spanIDWidth]) + if err != nil { + return ctx, empty, errInvalidSpanIDValue + } + pos += spanIDWidth // {traceID}-{spanID} + + if headerLen > pos { + if headerLen == pos+separatorWidth { + // {traceID}-{spanID}- is invalid. + return ctx, empty, errInvalidSampledByte + } + pos += separatorWidth // {traceID}-{spanID}- + + switch { + case headerLen == pos+samplingWidth: + sampling = string(contextHeader[pos]) + case headerLen == pos+parentSpanIDWidth: + // {traceID}-{spanID}-{parentSpanID} is invalid. + return ctx, empty, errInvalidScopeParentSingle + case headerLen == pos+samplingWidth+separatorWidth+parentSpanIDWidth: + sampling = string(contextHeader[pos]) + pos += samplingWidth + separatorWidth // {traceID}-{spanID}-{sampling}- + + // Validate parent span ID but we do not use it so do not + // save it. + _, err = trace.SpanIDFromHex(contextHeader[pos:]) + if err != nil { + return ctx, empty, errInvalidParentSpanIDValue + } + default: + return ctx, empty, errInvalidParentSpanIDValue + } + } + default: + return ctx, empty, errInvalidTraceIDValue + } + switch sampling { + case "": + ctx = withDeferred(ctx, true) + case "d": + ctx = withDebug(ctx, true) + scc.TraceFlags = trace.FlagsSampled + case "1": + scc.TraceFlags = trace.FlagsSampled + case "0": + // Zero value for TraceFlags sample bit is unset. + default: + return ctx, empty, errInvalidSampledByte + } + + return ctx, trace.NewSpanContext(scc), nil +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/context.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/context.go new file mode 100644 index 00000000000..988cd2ca821 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/context.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package b3 // import "go.opentelemetry.io/contrib/propagators/b3" + +import "context" + +type b3KeyType int + +const ( + debugKey b3KeyType = iota + deferredKey +) + +// withDebug returns a copy of parent with debug set as the debug flag value . +func withDebug(parent context.Context, debug bool) context.Context { + return context.WithValue(parent, debugKey, debug) +} + +// debugFromContext returns the debug value stored in ctx. +// +// If no debug value is stored in ctx false is returned. +func debugFromContext(ctx context.Context) bool { + if ctx == nil { + return false + } + if debug, ok := ctx.Value(debugKey).(bool); ok { + return debug + } + return false +} + +// withDeferred returns a copy of parent with deferred set as the deferred flag value . +func withDeferred(parent context.Context, deferred bool) context.Context { + return context.WithValue(parent, deferredKey, deferred) +} + +// deferredFromContext returns the deferred value stored in ctx. +// +// If no deferred value is stored in ctx false is returned. +func deferredFromContext(ctx context.Context) bool { + if ctx == nil { + return false + } + if deferred, ok := ctx.Value(deferredKey).(bool); ok { + return deferred + } + return false +} diff --git a/vendor/google.golang.org/genproto/googleapis/storage/v2/doc.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/doc.go similarity index 70% rename from vendor/google.golang.org/genproto/googleapis/storage/v2/doc.go rename to vendor/go.opentelemetry.io/contrib/propagators/b3/doc.go index 92b8b7d6205..21578cfbe6e 100644 --- a/vendor/google.golang.org/genproto/googleapis/storage/v2/doc.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/doc.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Warning: This package is not ready for production use and does not receive -// regular updates. Usage of this code is not supported. This code may be -// removed at will. Do not depend on it. -package storage +// Package b3 implements the B3 propagator specification as defined at +// https://github.com/openzipkin/b3-propagation +package b3 // import "go.opentelemetry.io/contrib/propagators/b3" diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go new file mode 100644 index 00000000000..47adc5d6502 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package b3 // import "go.opentelemetry.io/contrib/propagators/b3" + +// Version is the current release version of the B3 propagator. +func Version() string { + return "1.9.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/context.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/context.go new file mode 100644 index 00000000000..8a62451f746 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/context.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" + +import "context" + +type jaegerKeyType int + +const ( + debugKey jaegerKeyType = iota +) + +// withDebug returns a copy of parent with debug set as the debug flag value . +func withDebug(parent context.Context, debug bool) context.Context { + return context.WithValue(parent, debugKey, debug) +} + +// debugFromContext returns the debug value stored in ctx. +// +// If no debug value is stored in ctx false is returned. +func debugFromContext(ctx context.Context) bool { + if ctx == nil { + return false + } + if debug, ok := ctx.Value(debugKey).(bool); ok { + return debug + } + return false +} diff --git a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/doc.go similarity index 54% rename from vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go rename to vendor/go.opentelemetry.io/contrib/propagators/jaeger/doc.go index 1ae5c9186a3..1d3fb9f89f6 100644 --- a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/doc.go @@ -1,10 +1,10 @@ -// Copyright 2019 Google LLC +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,12 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack - -package storage - -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go" +// Package jaeger implements the Jaeger propagator specification as defined at +// https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go new file mode 100644 index 00000000000..3d206382373 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go @@ -0,0 +1,162 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + jaegerHeader = "uber-trace-id" + separator = ":" + traceID64bitsWidth = 64 / 4 + traceID128bitsWidth = 128 / 4 + spanIDWidth = 64 / 4 + + traceIDPadding = "0000000000000000" + + flagsDebug = 0x02 + flagsSampled = 0x01 + flagsNotSampled = 0x00 + + deprecatedParentSpanID = "0" +) + +var ( + empty = trace.SpanContext{} + + errMalformedTraceContextVal = errors.New("header value of uber-trace-id should contain four different part separated by : ") + errInvalidTraceIDLength = errors.New("invalid trace id length, must be either 16 or 32") + errMalformedTraceID = errors.New("cannot decode trace id from header, should be a string of hex, lowercase trace id can't be all zero") + errInvalidSpanIDLength = errors.New("invalid span id length, must be 16") + errMalformedSpanID = errors.New("cannot decode span id from header, should be a string of hex, lowercase span id can't be all zero") + errMalformedFlag = errors.New("cannot decode flag") +) + +// Jaeger propagator serializes SpanContext to/from Jaeger Headers +// +// Jaeger format: +// +// uber-trace-id: {trace-id}:{span-id}:{parent-span-id}:{flags}. +type Jaeger struct{} + +var _ propagation.TextMapPropagator = &Jaeger{} + +// Inject injects a context to the carrier following jaeger format. +// The parent span ID is set to an dummy parent span id as the most implementations do. +func (jaeger Jaeger) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + sc := trace.SpanFromContext(ctx).SpanContext() + headers := []string{} + if !sc.TraceID().IsValid() || !sc.SpanID().IsValid() { + return + } + headers = append(headers, sc.TraceID().String(), sc.SpanID().String(), deprecatedParentSpanID) + if debugFromContext(ctx) { + headers = append(headers, fmt.Sprintf("%x", flagsDebug|flagsSampled)) + } else if sc.IsSampled() { + headers = append(headers, fmt.Sprintf("%x", flagsSampled)) + } else { + headers = append(headers, fmt.Sprintf("%x", flagsNotSampled)) + } + + carrier.Set(jaegerHeader, strings.Join(headers, separator)) +} + +// Extract extracts a context from the carrier if it contains Jaeger headers. +func (jaeger Jaeger) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + // extract tracing information + if h := carrier.Get(jaegerHeader); h != "" { + ctx, sc, err := extract(ctx, h) + if err == nil && sc.IsValid() { + return trace.ContextWithRemoteSpanContext(ctx, sc) + } + } + + return ctx +} + +func extract(ctx context.Context, headerVal string) (context.Context, trace.SpanContext, error) { + var ( + scc = trace.SpanContextConfig{} + err error + ) + + parts := strings.Split(headerVal, separator) + if len(parts) != 4 { + return ctx, empty, errMalformedTraceContextVal + } + + // extract trace ID + if parts[0] != "" { + id := parts[0] + if len(id) != traceID128bitsWidth && len(id) != traceID64bitsWidth { + return ctx, empty, errInvalidTraceIDLength + } + // padding when length is 16 + if len(id) == traceID64bitsWidth { + id = traceIDPadding + id + } + scc.TraceID, err = trace.TraceIDFromHex(id) + if err != nil { + return ctx, empty, errMalformedTraceID + } + } + + // extract span ID + if parts[1] != "" { + id := parts[1] + if len(id) != spanIDWidth { + return ctx, empty, errInvalidSpanIDLength + } + scc.SpanID, err = trace.SpanIDFromHex(id) + if err != nil { + return ctx, empty, errMalformedSpanID + } + } + + // skip third part as it is deprecated + + // extract flag + if parts[3] != "" { + flagStr := parts[3] + flag, err := strconv.ParseInt(flagStr, 16, 64) + if err != nil { + return ctx, empty, errMalformedFlag + } + if flag&flagsSampled == flagsSampled { + // if sample bit is set, we check if debug bit is also set + if flag&flagsDebug == flagsDebug { + scc.TraceFlags |= trace.FlagsSampled + ctx = withDebug(ctx, true) + } else { + scc.TraceFlags |= trace.FlagsSampled + } + } + // ignore other bit, including firehose since we don't have corresponding flag in trace context. + } + return ctx, trace.NewSpanContext(scc), nil +} + +// Fields returns the Jaeger header key whose value is set with Inject. +func (jaeger Jaeger) Fields() []string { + return []string{jaegerHeader} +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go new file mode 100644 index 00000000000..ddde70a0ab9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" + +// Version is the current release version of the Jaeger propagator. +func Version() string { + return "1.9.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/ot/doc.go b/vendor/go.opentelemetry.io/contrib/propagators/ot/doc.go index e3a537e78e0..9e524637c15 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/ot/doc.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/ot/doc.go @@ -12,5 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This package implements the ot-tracer-* propagator used by Basic Tracer implementation from the OpenTracing project +// Package ot implements the ot-tracer-* propagator used by the default Tracer +// implementation from the OpenTracing project. package ot // import "go.opentelemetry.io/contrib/propagators/ot" diff --git a/vendor/go.opentelemetry.io/contrib/propagators/ot/ot_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/ot/ot_propagator.go index facd6331bcd..88366f3c260 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/ot/ot_propagator.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/ot/ot_propagator.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ot +package ot // import "go.opentelemetry.io/contrib/propagators/ot" import ( "context" @@ -20,17 +20,19 @@ import ( "fmt" "strings" - "go.opentelemetry.io/otel/baggage" + "go.uber.org/multierr" + "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) const ( // Default OT Header names. - traceIDHeader = "ot-tracer-traceid" - spanIDHeader = "ot-tracer-spanid" - sampledHeader = "ot-tracer-sampled" + traceIDHeader = "ot-tracer-traceid" + spanIDHeader = "ot-tracer-spanid" + sampledHeader = "ot-tracer-sampled" + baggageHeaderPrefix = "ot-baggage-" otTraceIDPadding = "0000000000000000" @@ -53,7 +55,7 @@ type OT struct { var _ propagation.TextMapPropagator = OT{} // Inject injects a context into the carrier as OT headers. -// NOTE: In order to interop with systems that use the OT header format, trace ids MUST be 64-bits +// NOTE: In order to interop with systems that use the OT header format, trace ids MUST be 64-bits. func (o OT) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { sc := trace.SpanFromContext(ctx).SpanContext() @@ -72,9 +74,8 @@ func (o OT) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { } for _, m := range baggage.FromContext(ctx).Members() { - carrier.Set(fmt.Sprintf("ot-baggage-%s", m.Key()), m.Value()) + carrier.Set(fmt.Sprintf("%s%s", baggageHeaderPrefix, m.Key()), m.Value()) } - } // Extract extracts a context from the carrier if it contains OT headers. @@ -93,18 +94,44 @@ func (o OT) Extract(ctx context.Context, carrier propagation.TextMapCarrier) con if err != nil || !sc.IsValid() { return ctx } - // TODO: implement extracting baggage - // - // this currently is not achievable without an implementation of `keys` - // on the carrier, see: - // https://github.com/open-telemetry/opentelemetry-go/issues/1493 + + bags, err := extractBags(carrier) + if err != nil { + return trace.ContextWithRemoteSpanContext(ctx, sc) + } + ctx = baggage.ContextWithBaggage(ctx, bags) return trace.ContextWithRemoteSpanContext(ctx, sc) } +// Fields returns the OT header keys whose values are set with Inject. func (o OT) Fields() []string { return []string{traceIDHeader, spanIDHeader, sampledHeader} } +// extractBags extracts OpenTracing baggage information from carrier. +func extractBags(carrier propagation.TextMapCarrier) (baggage.Baggage, error) { + var err error + var members []baggage.Member + for _, key := range carrier.Keys() { + lowerKey := strings.ToLower(key) + if !strings.HasPrefix(lowerKey, baggageHeaderPrefix) { + continue + } + strippedKey := strings.TrimPrefix(lowerKey, baggageHeaderPrefix) + member, e := baggage.NewMember(strippedKey, carrier.Get(key)) + if e != nil { + err = multierr.Append(err, e) + continue + } + members = append(members, member) + } + bags, e := baggage.New(members...) + if err != nil { + return bags, multierr.Append(err, e) + } + return bags, err +} + // extract reconstructs a SpanContext from header values based on OT // headers. func extract(traceID, spanID, sampled string) (trace.SpanContext, error) { diff --git a/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go b/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go index 2b45f950914..269db91a0e3 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ot +package ot // import "go.opentelemetry.io/contrib/propagators/ot" // Version is the current release version of the ot propagator. func Version() string { - return "1.4.0" + return "1.9.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go index 2fafbcd82bf..b29f618e3de 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go @@ -16,7 +16,6 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ import ( "crypto/tls" - "io/ioutil" "net/url" "os" "path" @@ -29,7 +28,7 @@ import ( // DefaultEnvOptionsReader is the default environments reader. var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: os.Getenv, - ReadFile: ioutil.ReadFile, + ReadFile: os.ReadFile, Namespace: "OTEL_EXPORTER_OTLP", } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 6be3fec8626..9d6e1898b14 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -26,6 +26,8 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" @@ -196,9 +198,16 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc defer cancel() return c.requestFunc(ctx, func(iCtx context.Context) error { - _, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, }) + if resp != nil && resp.PartialSuccess != nil { + otel.Handle(internal.PartialSuccessToError( + internal.TracingPartialSuccess, + resp.PartialSuccess.RejectedSpans, + resp.PartialSuccess.ErrorMessage, + )) + } // nil is converted to OK. if status.Code(err) == codes.OK { // Success. diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go index aed8b6660a5..7cce2bd15f4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go @@ -214,7 +214,7 @@ func (i *aiGauge) unwrap() instrument.Asynchronous { return nil } -//Sync Instruments. +// Sync Instruments. type sfCounter struct { name string opts []instrument.Option diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 00000000000..6e923acab43 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index 246873345de..39f025a1715 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -12,13 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package instrumentation provides an instrumentation library structure to be -passed to both the OpenTelemetry Tracer and Meter components. - -For more information see -[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md). -*/ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 775de40e30c..09c6d93f6d0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -12,13 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package instrumentation provides an instrumentation scope structure to be -passed to both the OpenTelemetry Tracer and Meter components. - -For more information see -[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md). -*/ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Scope represents the instrumentation scope. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index eac69f34d76..292ea5481bc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -90,10 +90,10 @@ var _ trace.TracerProvider = &TracerProvider{} // NewTracerProvider returns a new and configured TracerProvider. // // By default the returned TracerProvider is configured with: -// - a ParentBased(AlwaysSample) Sampler -// - a random number IDGenerator -// - the resource.Default() Resource -// - the default SpanLimits. +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. // // The passed opts are used to override these default values and configure the // returned TracerProvider appropriately. @@ -162,16 +162,16 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) { p.mu.Lock() defer p.mu.Unlock() - new := spanProcessorStates{} + newSPS := spanProcessorStates{} if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok { - new = append(new, old...) + newSPS = append(newSPS, old...) } newSpanSync := &spanProcessorState{ sp: s, state: &sync.Once{}, } - new = append(new, newSpanSync) - p.spanProcessors.Store(new) + newSPS = append(newSPS, newSpanSync) + p.spanProcessors.Store(newSPS) } // UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. @@ -241,10 +241,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { if !ok { return fmt.Errorf("failed to load span processors") } - if len(spss) == 0 { - return nil - } - + var retErr error for _, sps := range spss { select { case <-ctx.Done(): @@ -257,10 +254,15 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { err = sps.sp.Shutdown(ctx) }) if err != nil { - return err + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%v; %v", retErr, err) + } } } - return nil + return retErr } // TracerProviderOption configures a TracerProvider. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index a39d0341e4a..a6dcf4b307c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -102,6 +102,7 @@ func (ts traceIDRatioSampler) Description() string { // always sample. Fractions < 0 are treated as zero. To respect the // parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used // as a delegate of a `Parent` sampler. +// //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { if fraction >= 1 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 14d0aabfe69..449cf6c2552 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,8 +20,10 @@ import ( "reflect" "runtime" rt "runtime/trace" + "strings" "sync" "time" + "unicode/utf8" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -294,7 +296,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // truncateAttr returns a truncated version of attr. Only string and string // slice attribute values are truncated. String values are truncated to at -// most a length of limit. Each string slice value is truncated in this fasion +// most a length of limit. Each string slice value is truncated in this fashion // (the slice length itself is unaffected). // // No truncation is perfromed for a negative limit. @@ -305,7 +307,7 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { switch attr.Value.Type() { case attribute.STRING: if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(v[:limit]) + return attr.Key.String(safeTruncate(v, limit)) } case attribute.STRINGSLICE: // Do no mutate the original, make a copy. @@ -324,7 +326,7 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { v := trucated.Value.AsStringSlice() for i := range v { if len(v[i]) > limit { - v[i] = v[i][:limit] + v[i] = safeTruncate(v[i], limit) } } return trucated @@ -332,6 +334,34 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { return attr } +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index f4a1f96f3d6..7b11fc465c6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -37,6 +37,11 @@ var _ trace.Tracer = &tracer{} func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + // For local spans created by this SDK, track child span count. if p := trace.SpanFromContext(ctx); p != nil { if sdkSpan, ok := p.(*recordingSpan); ok { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go new file mode 100644 index 00000000000..104489e79fd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracetest is a testing helper package for the SDK. User can +// configure no-op or in-memory exporters to verify different SDK behaviors or +// custom instrumentation. +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/sdk/trace" +) + +var _ trace.SpanExporter = (*NoopExporter)(nil) + +// NewNoopExporter returns a new no-op exporter. +func NewNoopExporter() *NoopExporter { + return new(NoopExporter) +} + +// NoopExporter is an exporter that drops all received spans and performs no +// action. +type NoopExporter struct{} + +// ExportSpans handles export of spans by dropping them. +func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } + +// Shutdown stops the exporter by doing nothing. +func (nsb *NoopExporter) Shutdown(context.Context) error { return nil } + +var _ trace.SpanExporter = (*InMemoryExporter)(nil) + +// NewInMemoryExporter returns a new InMemoryExporter. +func NewInMemoryExporter() *InMemoryExporter { + return new(InMemoryExporter) +} + +// InMemoryExporter is an exporter that stores all received spans in-memory. +type InMemoryExporter struct { + mu sync.Mutex + ss SpanStubs +} + +// ExportSpans handles export of spans by storing them in memory. +func (imsb *InMemoryExporter) ExportSpans(_ context.Context, spans []trace.ReadOnlySpan) error { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = append(imsb.ss, SpanStubsFromReadOnlySpans(spans)...) + return nil +} + +// Shutdown stops the exporter by clearing spans held in memory. +func (imsb *InMemoryExporter) Shutdown(context.Context) error { + imsb.Reset() + return nil +} + +// Reset the current in-memory storage. +func (imsb *InMemoryExporter) Reset() { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = nil +} + +// GetSpans returns the current in-memory stored spans. +func (imsb *InMemoryExporter) GetSpans() SpanStubs { + imsb.mu.Lock() + defer imsb.mu.Unlock() + ret := make(SpanStubs, len(imsb.ss)) + copy(ret, imsb.ss) + return ret +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go new file mode 100644 index 00000000000..06673a1c049 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// SpanRecorder records started and ended spans. +type SpanRecorder struct { + startedMu sync.RWMutex + started []sdktrace.ReadWriteSpan + + endedMu sync.RWMutex + ended []sdktrace.ReadOnlySpan +} + +var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil) + +// NewSpanRecorder returns a new initialized SpanRecorder. +func NewSpanRecorder() *SpanRecorder { + return new(SpanRecorder) +} + +// OnStart records started spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) { + sr.startedMu.Lock() + defer sr.startedMu.Unlock() + sr.started = append(sr.started, s) +} + +// OnEnd records completed spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { + sr.endedMu.Lock() + defer sr.endedMu.Unlock() + sr.ended = append(sr.ended, s) +} + +// Shutdown does nothing. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Shutdown(context.Context) error { + return nil +} + +// ForceFlush does nothing. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) ForceFlush(context.Context) error { + return nil +} + +// Started returns a copy of all started spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { + sr.startedMu.RLock() + defer sr.startedMu.RUnlock() + dst := make([]sdktrace.ReadWriteSpan, len(sr.started)) + copy(dst, sr.started) + return dst +} + +// Ended returns a copy of all ended spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Ended() []sdktrace.ReadOnlySpan { + sr.endedMu.RLock() + defer sr.endedMu.RUnlock() + dst := make([]sdktrace.ReadOnlySpan, len(sr.ended)) + copy(dst, sr.ended) + return dst +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go new file mode 100644 index 00000000000..bfe73de9c41 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +// SpanStubs is a slice of SpanStub use for testing an SDK. +type SpanStubs []SpanStub + +// SpanStubsFromReadOnlySpans returns SpanStubs populated from ro. +func SpanStubsFromReadOnlySpans(ro []tracesdk.ReadOnlySpan) SpanStubs { + if len(ro) == 0 { + return nil + } + + s := make(SpanStubs, 0, len(ro)) + for _, r := range ro { + s = append(s, SpanStubFromReadOnlySpan(r)) + } + + return s +} + +// Snapshots returns s as a slice of ReadOnlySpans. +func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { + if len(s) == 0 { + return nil + } + + ro := make([]tracesdk.ReadOnlySpan, len(s)) + for i := 0; i < len(s); i++ { + ro[i] = s[i].Snapshot() + } + return ro +} + +// SpanStub is a stand-in for a Span. +type SpanStub struct { + Name string + SpanContext trace.SpanContext + Parent trace.SpanContext + SpanKind trace.SpanKind + StartTime time.Time + EndTime time.Time + Attributes []attribute.KeyValue + Events []tracesdk.Event + Links []tracesdk.Link + Status tracesdk.Status + DroppedAttributes int + DroppedEvents int + DroppedLinks int + ChildSpanCount int + Resource *resource.Resource + InstrumentationLibrary instrumentation.Library +} + +// SpanStubFromReadOnlySpan returns a SpanStub populated from ro. +func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { + if ro == nil { + return SpanStub{} + } + + return SpanStub{ + Name: ro.Name(), + SpanContext: ro.SpanContext(), + Parent: ro.Parent(), + SpanKind: ro.SpanKind(), + StartTime: ro.StartTime(), + EndTime: ro.EndTime(), + Attributes: ro.Attributes(), + Events: ro.Events(), + Links: ro.Links(), + Status: ro.Status(), + DroppedAttributes: ro.DroppedAttributes(), + DroppedEvents: ro.DroppedEvents(), + DroppedLinks: ro.DroppedLinks(), + ChildSpanCount: ro.ChildSpanCount(), + Resource: ro.Resource(), + InstrumentationLibrary: ro.InstrumentationScope(), + } +} + +// Snapshot returns a read-only copy of the SpanStub. +func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { + return spanSnapshot{ + name: s.Name, + spanContext: s.SpanContext, + parent: s.Parent, + spanKind: s.SpanKind, + startTime: s.StartTime, + endTime: s.EndTime, + attributes: s.Attributes, + events: s.Events, + links: s.Links, + status: s.Status, + droppedAttributes: s.DroppedAttributes, + droppedEvents: s.DroppedEvents, + droppedLinks: s.DroppedLinks, + childSpanCount: s.ChildSpanCount, + resource: s.Resource, + instrumentationScope: s.InstrumentationLibrary, + } +} + +type spanSnapshot struct { + // Embed the interface to implement the private method. + tracesdk.ReadOnlySpan + + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []tracesdk.Event + links []tracesdk.Link + status tracesdk.Status + droppedAttributes int + droppedEvents int + droppedLinks int + childSpanCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope +} + +func (s spanSnapshot) Name() string { return s.name } +func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext } +func (s spanSnapshot) Parent() trace.SpanContext { return s.parent } +func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind } +func (s spanSnapshot) StartTime() time.Time { return s.startTime } +func (s spanSnapshot) EndTime() time.Time { return s.endTime } +func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes } +func (s spanSnapshot) Links() []tracesdk.Link { return s.links } +func (s spanSnapshot) Events() []tracesdk.Event { return s.events } +func (s spanSnapshot) Status() tracesdk.Status { return s.status } +func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes } +func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks } +func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents } +func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount } +func (s spanSnapshot) Resource() *resource.Resource { return s.resource } +func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} +func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationScope +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go deleted file mode 100644 index 71ae5b57624..00000000000 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.17.3 -// source: opentelemetry/proto/trace/v1/trace_config.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -// Enum value maps for ConstantSampler_ConstantDecision. -var ( - ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", - } - ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, - } -) - -func (x ConstantSampler_ConstantDecision) Enum() *ConstantSampler_ConstantDecision { - p := new(ConstantSampler_ConstantDecision) - *p = x - return p -} - -func (x ConstantSampler_ConstantDecision) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ConstantSampler_ConstantDecision) Descriptor() protoreflect.EnumDescriptor { - return file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0].Descriptor() -} - -func (ConstantSampler_ConstantDecision) Type() protoreflect.EnumType { - return &file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0] -} - -func (x ConstantSampler_ConstantDecision) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ConstantSampler_ConstantDecision.Descriptor instead. -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. -type TraceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The global default sampler used to make decisions on span sampling. - // - // Types that are assignable to Sampler: - // *TraceConfig_ConstantSampler - // *TraceConfig_TraceIdRatioBased - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` - // The global default max number of attributes per timed event. - MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - // The global default max number of attributes per span. - MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` -} - -func (x *TraceConfig) Reset() { - *x = TraceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TraceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraceConfig) ProtoMessage() {} - -func (x *TraceConfig) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraceConfig.ProtoReflect.Descriptor instead. -func (*TraceConfig) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{0} -} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (x *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := x.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (x *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { - if x, ok := x.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { - return x.TraceIdRatioBased - } - return nil -} - -func (x *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := x.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (x *TraceConfig) GetMaxNumberOfAttributes() int64 { - if x != nil { - return x.MaxNumberOfAttributes - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfTimedEvents() int64 { - if x != nil { - return x.MaxNumberOfTimedEvents - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { - if x != nil { - return x.MaxNumberOfAttributesPerTimedEvent - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfLinks() int64 { - if x != nil { - return x.MaxNumberOfLinks - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { - if x != nil { - return x.MaxNumberOfAttributesPerLink - } - return 0 -} - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` -} - -type TraceConfig_TraceIdRatioBased struct { - TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof"` -} - -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` -} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} - -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -// Sampler that always makes a constant decision on span sampling. -type ConstantSampler struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` -} - -func (x *ConstantSampler) Reset() { - *x = ConstantSampler{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConstantSampler) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConstantSampler) ProtoMessage() {} - -func (x *ConstantSampler) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConstantSampler.ProtoReflect.Descriptor instead. -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1} -} - -func (x *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { - if x != nil { - return x.Decision - } - return ConstantSampler_ALWAYS_OFF -} - -// Sampler that tries to uniformly sample traces with a given ratio. -// The ratio of sampling a trace is equal to that of the specified ratio. -type TraceIdRatioBased struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The desired ratio of sampling. Must be within [0.0, 1.0]. - SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` -} - -func (x *TraceIdRatioBased) Reset() { - *x = TraceIdRatioBased{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TraceIdRatioBased) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraceIdRatioBased) ProtoMessage() {} - -func (x *TraceIdRatioBased) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraceIdRatioBased.ProtoReflect.Descriptor instead. -func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2} -} - -func (x *TraceIdRatioBased) GetSamplingRatio() float64 { - if x != nil { - return x.SamplingRatio - } - return 0 -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` -} - -func (x *RateLimitingSampler) Reset() { - *x = RateLimitingSampler{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RateLimitingSampler) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RateLimitingSampler) ProtoMessage() {} - -func (x *RateLimitingSampler) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RateLimitingSampler.ProtoReflect.Descriptor instead. -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3} -} - -func (x *RateLimitingSampler) GetQps() int64 { - if x != nil { - return x.Qps - } - return 0 -} - -var File_opentelemetry_proto_trace_v1_trace_config_proto protoreflect.FileDescriptor - -var file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, - 0x84, 0x05, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x5a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x14, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x62, 0x61, - 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, - 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x00, 0x52, 0x11, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, - 0x67, 0x0a, 0x15, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x13, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, - 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, - 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x4f, 0x66, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x54, 0x0a, - 0x28, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x22, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x10, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4c, 0x69, 0x6e, - 0x6b, 0x73, 0x12, 0x47, 0x0a, 0x21, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1c, 0x6d, - 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x64, 0x65, - 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x65, - 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, - 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x46, 0x46, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, - 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c, 0x57, - 0x41, 0x59, 0x53, 0x5f, 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x22, 0x39, 0x0a, 0x11, - 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, - 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, - 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, - 0x6e, 0x67, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x27, 0x0a, 0x13, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x10, - 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, - 0x42, 0x68, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce sync.Once - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc -) - -func file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP() []byte { - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce.Do(func() { - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData) - }) - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData -} - -var file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = []interface{}{ - (ConstantSampler_ConstantDecision)(0), // 0: opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision - (*TraceConfig)(nil), // 1: opentelemetry.proto.trace.v1.TraceConfig - (*ConstantSampler)(nil), // 2: opentelemetry.proto.trace.v1.ConstantSampler - (*TraceIdRatioBased)(nil), // 3: opentelemetry.proto.trace.v1.TraceIdRatioBased - (*RateLimitingSampler)(nil), // 4: opentelemetry.proto.trace.v1.RateLimitingSampler -} -var file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = []int32{ - 2, // 0: opentelemetry.proto.trace.v1.TraceConfig.constant_sampler:type_name -> opentelemetry.proto.trace.v1.ConstantSampler - 3, // 1: opentelemetry.proto.trace.v1.TraceConfig.trace_id_ratio_based:type_name -> opentelemetry.proto.trace.v1.TraceIdRatioBased - 4, // 2: opentelemetry.proto.trace.v1.TraceConfig.rate_limiting_sampler:type_name -> opentelemetry.proto.trace.v1.RateLimitingSampler - 0, // 3: opentelemetry.proto.trace.v1.ConstantSampler.decision:type_name -> opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_opentelemetry_proto_trace_v1_trace_config_proto_init() } -func file_opentelemetry_proto_trace_v1_trace_config_proto_init() { - if File_opentelemetry_proto_trace_v1_trace_config_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TraceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConstantSampler); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TraceIdRatioBased); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitingSampler); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_TraceIdRatioBased)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc, - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes, - DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs, - EnumInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes, - MessageInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes, - }.Build() - File_opentelemetry_proto_trace_v1_trace_config_proto = out.File - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = nil - file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = nil - file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = nil -} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go index a5e166b1fa3..fc285c089e7 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go @@ -91,6 +91,23 @@ type ExportTraceServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` } func (x *ExportTraceServiceResponse) Reset() { @@ -125,6 +142,79 @@ func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1} } +func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportTracePartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportTracePartialSuccess) Reset() { + *x = ExportTracePartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTracePartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTracePartialSuccess) ProtoMessage() {} + +func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 { + if x != nil { + return x.RejectedSpans + } + return 0 +} + +func (x *ExportTracePartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{ @@ -144,26 +234,42 @@ var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = [] 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, - 0x1c, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa2, 0x01, - 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, - 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x42, 0x73, 0x0a, 0x29, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, - 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, + 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, + 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -178,21 +284,23 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData } -var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{ (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse - (*v1.ResourceSpans)(nil), // 2: opentelemetry.proto.trace.v1.ResourceSpans + (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans } var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{ - 2, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans - 0, // 1: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest - 1, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() } @@ -225,6 +333,18 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { return nil } } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTracePartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -232,7 +352,7 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index e7ccdc0eef3..8502e607b25 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -353,69 +353,6 @@ func (x *KeyValue) GetValue() *AnyValue { return nil } -// InstrumentationLibrary is a message representing the instrumentation library information -// such as the fully qualified name and version. -// InstrumentationLibrary is wire-compatible with InstrumentationScope for binary -// Protobuf format. -// This message is deprecated and will be removed on June 15, 2022. -// -// Deprecated: Do not use. -type InstrumentationLibrary struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An empty instrumentation library name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` -} - -func (x *InstrumentationLibrary) Reset() { - *x = InstrumentationLibrary{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InstrumentationLibrary) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InstrumentationLibrary) ProtoMessage() {} - -func (x *InstrumentationLibrary) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InstrumentationLibrary.ProtoReflect.Descriptor instead. -func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} -} - -func (x *InstrumentationLibrary) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *InstrumentationLibrary) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - // InstrumentationScope is a message representing the instrumentation scope information // such as the fully qualified name and version. type InstrumentationScope struct { @@ -424,14 +361,16 @@ type InstrumentationScope struct { unknownFields protoimpl.UnknownFields // An empty instrumentation scope name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } func (x *InstrumentationScope) Reset() { *x = InstrumentationScope{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -444,7 +383,7 @@ func (x *InstrumentationScope) String() string { func (*InstrumentationScope) ProtoMessage() {} func (x *InstrumentationScope) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -457,7 +396,7 @@ func (x *InstrumentationScope) ProtoReflect() protoreflect.Message { // Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead. func (*InstrumentationScope) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} } func (x *InstrumentationScope) GetName() string { @@ -474,6 +413,20 @@ func (x *InstrumentationScope) GetVersion() string { return "" } +func (x *InstrumentationScope) GetAttributes() []*KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ @@ -519,23 +472,28 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4a, 0x0a, 0x16, 0x49, - 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x44, 0x0a, 0x14, 0x49, 0x6e, 0x73, 0x74, 0x72, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x5b, 0x0a, - 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -550,14 +508,13 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ - (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue - (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue - (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList - (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue - (*InstrumentationLibrary)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationLibrary - (*InstrumentationScope)(nil), // 5: opentelemetry.proto.common.v1.InstrumentationScope + (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue + (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue + (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList + (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue + (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -565,11 +522,12 @@ var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_opentelemetry_proto_common_v1_common_proto_init() } @@ -627,18 +585,6 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InstrumentationLibrary); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InstrumentationScope); i { case 0: return &v.state @@ -666,7 +612,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 6, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index 43810938251..bcc1060e3dd 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -115,14 +115,16 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x61, 0x0a, - 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, + 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index fcd6beb5ab5..499a43d77bb 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -107,7 +107,7 @@ func (x Span_SpanKind) Number() protoreflect.EnumNumber { // Deprecated: Use Span_SpanKind.Descriptor instead. func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} } // For the semantics of status codes see @@ -162,7 +162,7 @@ func (x Status_StatusCode) Number() protoreflect.EnumNumber { // Deprecated: Use Status_StatusCode.Descriptor instead. func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{5, 0} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} } // TracesData represents the traces data that can be stored in a persistent storage, @@ -238,36 +238,6 @@ type ResourceSpans struct { Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` - // A list of InstrumentationLibrarySpans that originate from a resource. - // This field is deprecated and will be removed after grace period expires on June 15, 2022. - // - // During the grace period the following rules SHOULD be followed: - // - // For Binary Protobufs - // ==================== - // Binary Protobuf senders SHOULD NOT set instrumentation_library_spans. Instead - // scope_spans SHOULD be set. - // - // Binary Protobuf receivers SHOULD check if instrumentation_library_spans is set - // and scope_spans is not set then the value in instrumentation_library_spans - // SHOULD be used instead by converting InstrumentationLibrarySpans into ScopeSpans. - // If scope_spans is set then instrumentation_library_spans SHOULD be ignored. - // - // For JSON - // ======== - // JSON senders that set instrumentation_library_spans field MAY also set - // scope_spans to carry the same spans, essentially double-publishing the same data. - // Such double-publishing MAY be controlled by a user-settable option. - // If double-publishing is not used then the senders SHOULD set scope_spans and - // SHOULD NOT set instrumentation_library_spans. - // - // JSON receivers SHOULD check if instrumentation_library_spans is set and - // scope_spans is not set then the value in instrumentation_library_spans - // SHOULD be used instead by converting InstrumentationLibrarySpans into ScopeSpans. - // If scope_spans is set then instrumentation_library_spans field SHOULD be ignored. - // - // Deprecated: Do not use. - InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,1000,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -319,14 +289,6 @@ func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans { return nil } -// Deprecated: Do not use. -func (x *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { - if x != nil { - return x.InstrumentationLibrarySpans - } - return nil -} - func (x *ResourceSpans) GetSchemaUrl() string { if x != nil { return x.SchemaUrl @@ -403,87 +365,7 @@ func (x *ScopeSpans) GetSchemaUrl() string { return "" } -// A collection of Spans produced by an InstrumentationLibrary. -// InstrumentationLibrarySpans is wire-compatible with ScopeSpans for binary -// Protobuf format. -// This message is deprecated and will be removed on June 15, 2022. -// -// Deprecated: Do not use. -type InstrumentationLibrarySpans struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The instrumentation library information for the spans in this message. - // Semantically when InstrumentationLibrary isn't set, it is equivalent with - // an empty instrumentation library name (unknown). - InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` - // A list of Spans that originate from an instrumentation library. - Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` - // This schema_url applies to all spans and span events in the "spans" field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (x *InstrumentationLibrarySpans) Reset() { - *x = InstrumentationLibrarySpans{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InstrumentationLibrarySpans) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InstrumentationLibrarySpans) ProtoMessage() {} - -func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InstrumentationLibrarySpans.ProtoReflect.Descriptor instead. -func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} -} - -func (x *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { - if x != nil { - return x.InstrumentationLibrary - } - return nil -} - -func (x *InstrumentationLibrarySpans) GetSpans() []*Span { - if x != nil { - return x.Spans - } - return nil -} - -func (x *InstrumentationLibrarySpans) GetSchemaUrl() string { - if x != nil { - return x.SchemaUrl - } - return "" -} - -// Span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace and form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. +// A Span represents a single operation performed by a single component of the system. // // The next available field id is 17. type Span struct { @@ -555,7 +437,7 @@ type Span struct { // "abc.com/score": 10.239 // // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` @@ -582,7 +464,7 @@ type Span struct { func (x *Span) Reset() { *x = Span{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -595,7 +477,7 @@ func (x *Span) String() string { func (*Span) ProtoMessage() {} func (x *Span) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -608,7 +490,7 @@ func (x *Span) ProtoReflect() protoreflect.Message { // Deprecated: Use Span.ProtoReflect.Descriptor instead. func (*Span) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} } func (x *Span) GetTraceId() []byte { @@ -732,7 +614,7 @@ type Status struct { func (x *Status) Reset() { *x = Status{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -745,7 +627,7 @@ func (x *Status) String() string { func (*Status) ProtoMessage() {} func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -758,7 +640,7 @@ func (x *Status) ProtoReflect() protoreflect.Message { // Deprecated: Use Status.ProtoReflect.Descriptor instead. func (*Status) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{5} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4} } func (x *Status) GetMessage() string { @@ -799,7 +681,7 @@ type Span_Event struct { func (x *Span_Event) Reset() { *x = Span_Event{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -812,7 +694,7 @@ func (x *Span_Event) String() string { func (*Span_Event) ProtoMessage() {} func (x *Span_Event) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -825,7 +707,7 @@ func (x *Span_Event) ProtoReflect() protoreflect.Message { // Deprecated: Use Span_Event.ProtoReflect.Descriptor instead. func (*Span_Event) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} } func (x *Span_Event) GetTimeUnixNano() uint64 { @@ -884,7 +766,7 @@ type Span_Link struct { func (x *Span_Link) Reset() { *x = Span_Link{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[7] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -897,7 +779,7 @@ func (x *Span_Link) String() string { func (*Span_Link) ProtoMessage() {} func (x *Span_Link) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[7] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -910,7 +792,7 @@ func (x *Span_Link) ProtoReflect() protoreflect.Message { // Deprecated: Use Span_Link.ProtoReflect.Descriptor instead. func (*Span_Link) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 1} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1} } func (x *Span_Link) GetTraceId() []byte { @@ -967,7 +849,7 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc5, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -977,143 +859,123 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x82, 0x01, 0x0a, 0x1d, 0x69, - 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0xe8, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xb0, - 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x49, 0x0a, - 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, - 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x70, - 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, - 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, - 0x6c, 0x22, 0xea, 0x01, 0x0a, 0x1b, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, - 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x73, 0x74, 0x72, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, - 0x79, 0x12, 0x38, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x9c, - 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, - 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, - 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, - 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, - 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, - 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, - 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, - 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, - 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, - 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, + 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, + 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, + 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, + 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, + 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, + 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, + 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, + 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, - 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, - 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, - 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, - 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, - 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, - 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, - 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, - 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, - 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, - 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, - 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, - 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, - 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, - 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, - 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x58, 0x0a, - 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, - 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, + 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, + 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, + 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, + 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, + 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, + 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, + 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, + 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, + 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, + 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, + 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, + 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, + 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, + 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, + 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1129,45 +991,40 @@ func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { } var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ - (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind - (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode - (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData - (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans - (*ScopeSpans)(nil), // 4: opentelemetry.proto.trace.v1.ScopeSpans - (*InstrumentationLibrarySpans)(nil), // 5: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span - (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status - (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event - (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link - (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource - (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope - (*v11.InstrumentationLibrary)(nil), // 12: opentelemetry.proto.common.v1.InstrumentationLibrary - (*v11.KeyValue)(nil), // 13: opentelemetry.proto.common.v1.KeyValue + (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind + (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode + (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans + (*ScopeSpans)(nil), // 4: opentelemetry.proto.trace.v1.ScopeSpans + (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue } var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ 3, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans - 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 9, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans - 5, // 3: opentelemetry.proto.trace.v1.ResourceSpans.instrumentation_library_spans:type_name -> opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - 11, // 4: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope - 6, // 5: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span - 12, // 6: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.instrumentation_library:type_name -> opentelemetry.proto.common.v1.InstrumentationLibrary - 6, // 7: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.spans:type_name -> opentelemetry.proto.trace.v1.Span - 0, // 8: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind - 13, // 9: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 8, // 10: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event - 9, // 11: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link - 7, // 12: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status - 1, // 13: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode - 13, // 14: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 13, // 15: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 10, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 5, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 0, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind + 11, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 7, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event + 8, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link + 6, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status + 1, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode + 11, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 11, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name } func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() } @@ -1213,18 +1070,6 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InstrumentationLibrarySpans); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Span); i { case 0: return &v.state @@ -1236,7 +1081,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { return nil } } - file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Status); i { case 0: return &v.state @@ -1248,7 +1093,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { return nil } } - file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Span_Event); i { case 0: return &v.state @@ -1260,7 +1105,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { return nil } } - file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Span_Link); i { case 0: return &v.state @@ -1279,7 +1124,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc, NumEnums: 2, - NumMessages: 8, + NumMessages: 7, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 38f564e2b36..5fe03f21bd3 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,23 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.10.0] - 2022-08-11 +### Added +- Add `atomic.Float32` type for atomic operations on `float32`. +- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`, + and `atomic.Value`. +- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any + type. This is present only for Go 1.18 or higher, and is a drop-in for + replacement for the standard library's `sync/atomic.Pointer` type. + +### Changed +- Deprecate `CAS` methods on all types in favor of corresponding + `CompareAndSwap` methods. + +Thanks to @eNV25 and @icpd for their contributions to this release. + +[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0 + ## [1.9.0] - 2021-07-15 ### Added - Add `Float64.Swap` to match int atomic operations. diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go index 209df7bbcd2..dfa2085f491 100644 --- a/vendor/go.uber.org/atomic/bool.go +++ b/vendor/go.uber.org/atomic/bool.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -55,8 +55,15 @@ func (x *Bool) Store(val bool) { } // CAS is an atomic compare-and-swap for bool values. +// +// Deprecated: Use CompareAndSwap. func (x *Bool) CAS(old, new bool) (swapped bool) { - return x.v.CAS(boolToInt(old), boolToInt(new)) + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for bool values. +func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { + return x.v.CompareAndSwap(boolToInt(old), boolToInt(new)) } // Swap atomically stores the given bool and returns the old diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go index 207594f5e80..6f4157445cf 100644 --- a/vendor/go.uber.org/atomic/duration.go +++ b/vendor/go.uber.org/atomic/duration.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -56,8 +56,15 @@ func (x *Duration) Store(val time.Duration) { } // CAS is an atomic compare-and-swap for time.Duration values. +// +// Deprecated: Use CompareAndSwap. func (x *Duration) CAS(old, new time.Duration) (swapped bool) { - return x.v.CAS(int64(old), int64(new)) + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { + return x.v.CompareAndSwap(int64(old), int64(new)) } // Swap atomically stores the given time.Duration and returns the old diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index 3be19c35ee7..27b23ea1628 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -49,3 +49,14 @@ func (x *Error) Load() error { func (x *Error) Store(val error) { x.v.Store(packError(val)) } + +// CompareAndSwap is an atomic compare-and-swap for error values. +func (x *Error) CompareAndSwap(old, new error) (swapped bool) { + return x.v.CompareAndSwap(packError(old), packError(new)) +} + +// Swap atomically stores the given error and returns the old +// value. +func (x *Error) Swap(val error) (old error) { + return unpackError(x.v.Swap(packError(val))) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go index ffe0be21cb0..d31fb633bb6 100644 --- a/vendor/go.uber.org/atomic/error_ext.go +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,7 +23,7 @@ package atomic // atomic.Value panics on nil inputs, or if the underlying type changes. // Stabilize by always storing a custom struct that we control. -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go type packedError struct{ Value error } diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go new file mode 100644 index 00000000000..5d535a6d2ac --- /dev/null +++ b/vendor/go.uber.org/atomic/float32.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float32 is an atomic type-safe wrapper for float32 values. +type Float32 struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroFloat32 float32 + +// NewFloat32 creates a new Float32. +func NewFloat32(val float32) *Float32 { + x := &Float32{} + if val != _zeroFloat32 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float32. +func (x *Float32) Load() float32 { + return math.Float32frombits(x.v.Load()) +} + +// Store atomically stores the passed float32. +func (x *Float32) Store(val float32) { + x.v.Store(math.Float32bits(val)) +} + +// Swap atomically stores the given float32 and returns the old +// value. +func (x *Float32) Swap(val float32) (old float32) { + return math.Float32frombits(x.v.Swap(math.Float32bits(val))) +} + +// MarshalJSON encodes the wrapped float32 into JSON. +func (x *Float32) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float32 from JSON. +func (x *Float32) UnmarshalJSON(b []byte) error { + var v float32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go new file mode 100644 index 00000000000..b0cd8d9c820 --- /dev/null +++ b/vendor/go.uber.org/atomic/float32_ext.go @@ -0,0 +1,76 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go + +// Add atomically adds to the wrapped float32 and returns the new value. +func (f *Float32) Add(delta float32) float32 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float32 and returns the new value. +func (f *Float32) Sub(delta float32) float32 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float32 values. +// +// Deprecated: Use CompareAndSwap +func (f *Float32) CAS(old, new float32) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float32 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) { + return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float32) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go index 8a136718472..11d5189a5f2 100644 --- a/vendor/go.uber.org/atomic/float64.go +++ b/vendor/go.uber.org/atomic/float64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go index df36b0107f0..48c52b0abf6 100644 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -45,21 +45,28 @@ func (f *Float64) Sub(delta float64) float64 { // CAS is an atomic compare-and-swap for float64 values. // -// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CAS allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CAS loops from blocking forever, e.g., +// Deprecated: Use CompareAndSwap +func (f *Float64) CAS(old, new float64) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float64 values. // -// for { -// old := atom.Load() -// new = f(old) -// if atom.CAS(old, new) { -// break -// } -// } +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., // -// If CAS did not match NaN to match, then the above would loop forever. -func (f *Float64) CAS(old, new float64) (swapped bool) { - return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) { + return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new)) } // String encodes the wrapped value as a string. diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go index 640ea36a175..b9a68f42ca8 100644 --- a/vendor/go.uber.org/atomic/int32.go +++ b/vendor/go.uber.org/atomic/int32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Int32) Dec() int32 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Int32) CAS(old, new int32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { return atomic.CompareAndSwapInt32(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go index 9ab66b98091..78d260976fc 100644 --- a/vendor/go.uber.org/atomic/int64.go +++ b/vendor/go.uber.org/atomic/int64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Int64) Dec() int64 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Int64) CAS(old, new int64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { return atomic.CompareAndSwapInt64(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go index a8201cb4a18..54b74174abd 100644 --- a/vendor/go.uber.org/atomic/nocmp.go +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -23,13 +23,13 @@ package atomic // nocmp is an uncomparable struct. Embed this inside another struct to make // it uncomparable. // -// type Foo struct { -// nocmp -// // ... -// } +// type Foo struct { +// nocmp +// // ... +// } // // This DOES NOT: // -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go new file mode 100644 index 00000000000..e0f47dba468 --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go118.go @@ -0,0 +1,60 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 && !go1.19 +// +build go1.18,!go1.19 + +package atomic + +import "unsafe" + +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p UnsafePointer +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(unsafe.Pointer(v)) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return (*T)(p.p.Load()) +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(unsafe.Pointer(val)) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return (*T)(p.p.Swap(unsafe.Pointer(val))) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} diff --git a/vendor/go.uber.org/goleak/tracestack_old.go b/vendor/go.uber.org/atomic/pointer_go119.go similarity index 54% rename from vendor/go.uber.org/goleak/tracestack_old.go rename to vendor/go.uber.org/atomic/pointer_go119.go index 1874384ceb6..6726f17ad64 100644 --- a/vendor/go.uber.org/goleak/tracestack_old.go +++ b/vendor/go.uber.org/atomic/pointer_go119.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,21 +18,44 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -//go:build !go1.16 -// +build !go1.16 +//go:build go1.19 +// +build go1.19 -package goleak +package atomic -import ( - "strings" +import "sync/atomic" - "go.uber.org/goleak/internal/stack" -) +// Pointer is an atomic pointer of type *T. +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p atomic.Pointer[T] +} -func isTraceStack(s stack.Stack) bool { - if f := s.FirstFunction(); f != "runtime.goparkunlock" { - return false +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(v) } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return p.p.Load() +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(val) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return p.p.Swap(val) +} - return strings.Contains(s.Full(), "runtime.ReadTrace") +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(old, new) } diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index 80df93d0949..c4bea70f4dd 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -52,3 +52,14 @@ func (x *String) Load() string { func (x *String) Store(val string) { x.v.Store(val) } + +// CompareAndSwap is an atomic compare-and-swap for string values. +func (x *String) CompareAndSwap(old, new string) (swapped bool) { + return x.v.CompareAndSwap(old, new) +} + +// Swap atomically stores the given string and returns the old +// value. +func (x *String) Swap(val string) (old string) { + return x.v.Swap(val).(string) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go index 83d92edafc7..1f63dfd5b97 100644 --- a/vendor/go.uber.org/atomic/string_ext.go +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,9 +20,7 @@ package atomic -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go -// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which -// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go // String returns the wrapped value. func (s *String) String() string { diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go index 33460fc37ea..1660feb1426 100644 --- a/vendor/go.uber.org/atomic/time.go +++ b/vendor/go.uber.org/atomic/time.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go index 7859a9cc3b5..d6f04a96dc3 100644 --- a/vendor/go.uber.org/atomic/uint32.go +++ b/vendor/go.uber.org/atomic/uint32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uint32) Dec() uint32 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uint32) CAS(old, new uint32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { return atomic.CompareAndSwapUint32(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go index 2f2a7db6380..2574bdd5ec4 100644 --- a/vendor/go.uber.org/atomic/uint64.go +++ b/vendor/go.uber.org/atomic/uint64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uint64) Dec() uint64 { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uint64) CAS(old, new uint64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { return atomic.CompareAndSwapUint64(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go index ecf7a77273a..81b275a7ad5 100644 --- a/vendor/go.uber.org/atomic/uintptr.go +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2021 Uber Technologies, Inc. +// Copyright (c) 2020-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -66,7 +66,14 @@ func (i *Uintptr) Dec() uintptr { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { return atomic.CompareAndSwapUintptr(&i.v, old, new) } diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go index 169f793dcf3..34868baf6a8 100644 --- a/vendor/go.uber.org/atomic/unsafe_pointer.go +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2021-2022 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -53,6 +53,13 @@ func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { } // CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return p.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { return atomic.CompareAndSwapPointer(&p.v, old, new) } diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go index 671f3a38247..52caedb9a58 100644 --- a/vendor/go.uber.org/atomic/value.go +++ b/vendor/go.uber.org/atomic/value.go @@ -25,7 +25,7 @@ import "sync/atomic" // Value shadows the type of the same name from sync/atomic // https://godoc.org/sync/atomic#Value type Value struct { - atomic.Value - _ nocmp // disallow non-atomic comparison + + atomic.Value } diff --git a/vendor/go.uber.org/goleak/CHANGELOG.md b/vendor/go.uber.org/goleak/CHANGELOG.md index a6275e27d3b..761db2caa55 100644 --- a/vendor/go.uber.org/goleak/CHANGELOG.md +++ b/vendor/go.uber.org/goleak/CHANGELOG.md @@ -4,22 +4,40 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.2.0] +### Added +- Add Cleanup option that can be used for registering cleanup callbacks. (#78) + +### Changed +- Mark VerifyNone as a test helper. (#75) + +Thanks to @tallclair for their contribution to this release. + +[1.2.0]: https://github.com/uber-go/goleak/compare/v1.1.12...v1.2.0 + ## [1.1.12] ### Fixed - Fixed logic for ignoring trace related goroutines on Go versions 1.16 and above. +[1.1.12]: https://github.com/uber-go/goleak/compare/v1.1.11...v1.1.12 + ## [1.1.11] ### Fixed - Documentation fix on how to test. - Update dependency on stretchr/testify to v1.7.0. (#59) - Update dependency on golang.org/x/tools to address CVE-2020-14040. (#62) +[1.1.11]: https://github.com/uber-go/goleak/compare/v1.1.10...v1.1.11 + ## [1.1.10] ### Added - [#49]: Add option to ignore current goroutines, which checks for any additional leaks and allows for incremental adoption of goleak in larger projects. Thanks to @denis-tingajkin for their contributions to this release. +[#49]: https://github.com/uber-go/goleak/pull/49 +[1.1.10]: https://github.com/uber-go/goleak/compare/v1.0.0...v1.1.10 + ## [1.0.0] ### Changed - Migrate to Go modules. @@ -27,8 +45,7 @@ Thanks to @denis-tingajkin for their contributions to this release. ### Fixed - Ignore trace related goroutines that cause false positives with -trace. +[1.0.0]: https://github.com/uber-go/goleak/compare/v0.10.0...v1.0.0 + ## 0.10.0 - Initial release. - -[1.0.0]: https://github.com/uber-go/goleak/compare/v0.10.0...v1.0.0 -[#49]: https://github.com/uber-go/goleak/pull/49 diff --git a/vendor/go.uber.org/goleak/leaks.go b/vendor/go.uber.org/goleak/leaks.go index 468dbaf9517..ee122b7464e 100644 --- a/vendor/go.uber.org/goleak/leaks.go +++ b/vendor/go.uber.org/goleak/leaks.go @@ -21,6 +21,7 @@ package goleak import ( + "errors" "fmt" "go.uber.org/goleak/internal/stack" @@ -55,6 +56,9 @@ func Find(options ...Option) error { cur := stack.Current().ID() opts := buildOpts(options...) + if opts.cleanup != nil { + return errors.New("Cleanup can only be passed to VerifyNone or VerifyTestMain") + } var stacks []stack.Stack retry := true for i := 0; retry; i++ { @@ -69,12 +73,30 @@ func Find(options ...Option) error { return fmt.Errorf("found unexpected goroutines:\n%s", stacks) } +type testHelper interface { + Helper() +} + // VerifyNone marks the given TestingT as failed if any extra goroutines are // found by Find. This is a helper method to make it easier to integrate in // tests by doing: -// defer VerifyNone(t) +// +// defer VerifyNone(t) func VerifyNone(t TestingT, options ...Option) { - if err := Find(options...); err != nil { + opts := buildOpts(options...) + var cleanup func(int) + cleanup, opts.cleanup = opts.cleanup, nil + + if h, ok := t.(testHelper); ok { + // Mark this function as a test helper, if available. + h.Helper() + } + + if err := Find(opts); err != nil { t.Error(err) } + + if cleanup != nil { + cleanup(0) + } } diff --git a/vendor/go.uber.org/goleak/options.go b/vendor/go.uber.org/goleak/options.go index 33266f61700..d2d473b71eb 100644 --- a/vendor/go.uber.org/goleak/options.go +++ b/vendor/go.uber.org/goleak/options.go @@ -41,6 +41,16 @@ type opts struct { filters []func(stack.Stack) bool maxRetries int maxSleep time.Duration + cleanup func(int) +} + +// implement apply so that opts struct itself can be used as +// an Option. +func (o *opts) apply(opts *opts) { + opts.filters = o.filters + opts.maxRetries = o.maxRetries + opts.maxSleep = o.maxSleep + opts.cleanup = o.cleanup } // optionFunc lets us easily write options without a custom type. @@ -57,6 +67,18 @@ func IgnoreTopFunction(f string) Option { }) } +// Cleanup sets up a cleanup function that will be executed at the +// end of the leak check. +// When passed to [VerifyTestMain], the exit code passed to cleanupFunc +// will be set to the exit code of TestMain. +// When passed to [VerifyNone], the exit code will be set to 0. +// This cannot be passed to [Find]. +func Cleanup(cleanupFunc func(exitCode int)) Option { + return optionFunc(func(opts *opts) { + opts.cleanup = cleanupFunc + }) +} + // IgnoreCurrent records all current goroutines when the option is created, and ignores // them in any future Find/Verify calls. func IgnoreCurrent() Option { @@ -98,8 +120,8 @@ func buildOpts(options ...Option) *opts { return opts } -func (vo *opts) filter(s stack.Stack) bool { - for _, filter := range vo.filters { +func (o *opts) filter(s stack.Stack) bool { + for _, filter := range o.filters { if filter(s) { return true } @@ -107,14 +129,14 @@ func (vo *opts) filter(s stack.Stack) bool { return false } -func (vo *opts) retry(i int) bool { - if i >= vo.maxRetries { +func (o *opts) retry(i int) bool { + if i >= o.maxRetries { return false } d := time.Duration(int(time.Microsecond) << uint(i)) - if d > vo.maxSleep { - d = vo.maxSleep + if d > o.maxSleep { + d = o.maxSleep } time.Sleep(d) return true diff --git a/vendor/go.uber.org/goleak/testmain.go b/vendor/go.uber.org/goleak/testmain.go index 316f6e1badd..7b1a50b7afb 100644 --- a/vendor/go.uber.org/goleak/testmain.go +++ b/vendor/go.uber.org/goleak/testmain.go @@ -41,9 +41,9 @@ type TestingM interface { // verify that there were no goroutine leaks. // To use it, your TestMain function should look like: // -// func TestMain(m *testing.M) { -// goleak.VerifyTestMain(m) -// } +// func TestMain(m *testing.M) { +// goleak.VerifyTestMain(m) +// } // // See https://golang.org/pkg/testing/#hdr-Main for more details. // @@ -51,13 +51,19 @@ type TestingM interface { // for any goroutine leaks and fail the tests if any leaks were found. func VerifyTestMain(m TestingM, options ...Option) { exitCode := m.Run() + opts := buildOpts(options...) + + var cleanup func(int) + cleanup, opts.cleanup = opts.cleanup, nil + if cleanup == nil { + cleanup = _osExit + } + defer func() { cleanup(exitCode) }() if exitCode == 0 { - if err := Find(options...); err != nil { + if err := Find(opts); err != nil { fmt.Fprintf(_osStderr, "goleak: Errors on successful test run: %v\n", err) exitCode = 1 } } - - _osExit(exitCode) } diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go index cf8947c3327..0aa307c0611 100644 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -94,7 +94,7 @@ func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value u func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { offset := int(ins.Off) - size := int(ins.Size) + size := ins.Size return loadCommon(in, offset, size) } @@ -121,7 +121,7 @@ func loadExtension(ins LoadExtension, in []byte) uint32 { func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { offset := int(ins.Off) + int(regX) - size := int(ins.Size) + size := ins.Size return loadCommon(in, offset, size) } diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 0a54bdbcc65..2cb9c408f2e 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -32,7 +32,7 @@ var DeadlineExceeded = context.DeadlineExceeded // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) + return ctx, f } // WithDeadline returns a copy of the parent context with the deadline adjusted @@ -46,7 +46,7 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) + return ctx, f } // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 47524a61a5d..a894e6901b4 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -869,9 +869,7 @@ func (sc *serverConn) serve() { // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { - sc.sendWindowUpdate(nil, int(diff)) - } + sc.sendWindowUpdate(nil) if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) @@ -1371,6 +1369,9 @@ func (sc *serverConn) startGracefulShutdownInternal() { func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() if sc.inGoAway { + if sc.goAwayCode == ErrCodeNo { + sc.goAwayCode = code + } return } sc.inGoAway = true @@ -1585,7 +1586,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { if p := st.body; p != nil { // Return any buffered unread bytes worth of conn-level flow control. // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) + sc.sendWindowUpdate(nil) p.CloseWithError(err) } @@ -1733,7 +1734,7 @@ func (sc *serverConn) processData(f *DataFrame) error { // sendWindowUpdate, which also schedules sending the // frames. sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + sc.sendWindowUpdate(nil) // conn-level if st != nil && st.resetQueued { // Already have a stream error in flight. Don't send another. @@ -1747,6 +1748,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + if sc.inflow.available() < int32(f.Length) { + return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) + } + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil) // conn-level + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the // value of a content-length header field does not equal the sum of the @@ -1763,7 +1770,7 @@ func (sc *serverConn) processData(f *DataFrame) error { if len(data) > 0 { wrote, err := st.body.Write(data) if err != nil { - sc.sendWindowUpdate(nil, int(f.Length)-wrote) + sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote)) return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) } if wrote != len(data) { @@ -2090,12 +2097,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - bodyOpen := !f.StreamEnded() - if rp.method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol)) - } - rp.header = make(http.Header) for _, hf := range f.RegularFields() { rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) @@ -2108,6 +2109,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if err != nil { return nil, nil, err } + bodyOpen := !f.StreamEnded() if bodyOpen { if vv, ok := rp.header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { @@ -2223,6 +2225,9 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler didPanic := true defer func() { rw.rws.stream.cancelCtx() + if req.MultipartForm != nil { + req.MultipartForm.RemoveAll() + } if didPanic { e := recover() sc.writeFrameFromHandler(FrameWriteRequest{ @@ -2317,17 +2322,32 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { func (sc *serverConn) noteBodyRead(st *stream, n int) { sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level + sc.sendWindowUpdate(nil) // conn-level if st.state != stateHalfClosedRemote && st.state != stateClosed { // Don't send this WINDOW_UPDATE if the stream is closed // remotely. - sc.sendWindowUpdate(st, n) + sc.sendWindowUpdate(st) } } // st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { +func (sc *serverConn) sendWindowUpdate(st *stream) { sc.serveG.check() + + var n int32 + if st == nil { + if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 { + return + } else { + n = windowSize - avail + } + } else { + if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 { + return + } else { + n = windowSize - avail + } + } // "The legal range for the increment to the flow control // window is 1 to 2^31-1 (2,147,483,647) octets." // A Go Read call on 64-bit machines could in theory read @@ -2493,6 +2513,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { rws.writeHeader(200) } + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true @@ -2564,10 +2588,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { return 0, nil } - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - // only send trailers if they have actually been defined by the // server handler. hasNonemptyTrailers := rws.hasNonemptyTrailers() diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4ded4dfd56c..e9aba4390f6 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -67,13 +67,23 @@ const ( // A Transport internally caches connections to servers. It is safe // for concurrent use by multiple goroutines. type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. + // DialTLSContext specifies an optional dial function with context for + // creating TLS connections for requests. // - // If DialTLS is nil, tls.Dial is used. + // If DialTLSContext and DialTLS is nil, tls.Dial is used. // // If the returned net.Conn has a ConnectionState method like tls.Conn, // it will be used to set http.Response.TLS. + DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) + + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLSContext and DialTLS is nil, tls.Dial is used. + // + // Deprecated: Use DialTLSContext instead, which allows the transport + // to cancel dials as soon as they are no longer needed. + // If both are set, DialTLSContext takes priority. DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) // TLSClientConfig specifies the TLS configuration to use with @@ -248,7 +258,8 @@ func (t *Transport) initConnPool() { // HTTP/2 server. type ClientConn struct { t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls + tconn net.Conn // usually *tls.Conn, except specialized impls + tconnClosed bool tlsState *tls.ConnectionState // nil only for specialized impls reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request @@ -592,7 +603,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host)) + tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } @@ -613,24 +624,25 @@ func (t *Transport) newTLSConfig(host string) *tls.Config { return cfg } -func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS +func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) { + if t.DialTLSContext != nil { + return t.DialTLSContext(ctx, network, addr, tlsCfg) + } else if t.DialTLS != nil { + return t.DialTLS(network, addr, tlsCfg) } - return func(network, addr string, cfg *tls.Config) (net.Conn, error) { - tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg) - if err != nil { - return nil, err - } - state := tlsCn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return tlsCn, nil + + tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg) + if err != nil { + return nil, err + } + state := tlsCn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return tlsCn, nil } // disableKeepAlives reports whether connections should be closed as @@ -910,10 +922,10 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } -func (cc *ClientConn) closeConn() error { +func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() - return cc.tconn.Close() + cc.tconn.Close() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -979,7 +991,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { shutdownEnterWaitStateHook() select { case <-done: - return cc.closeConn() + cc.closeConn() + return nil case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -1016,7 +1029,7 @@ func (cc *ClientConn) sendGoAway() error { // closes the client connection immediately. In-flight requests are interrupted. // err is sent to streams. -func (cc *ClientConn) closeForError(err error) error { +func (cc *ClientConn) closeForError(err error) { cc.mu.Lock() cc.closed = true for _, cs := range cc.streams { @@ -1024,7 +1037,7 @@ func (cc *ClientConn) closeForError(err error) error { } cc.cond.Broadcast() cc.mu.Unlock() - return cc.closeConn() + cc.closeConn() } // Close closes the client connection immediately. @@ -1032,16 +1045,17 @@ func (cc *ClientConn) closeForError(err error) error { // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { err := errors.New("http2: client connection force closed via ClientConn.Close") - return cc.closeForError(err) + cc.closeForError(err) + return nil } // closes the client connection immediately. In-flight requests are interrupted. -func (cc *ClientConn) closeForLostPing() error { +func (cc *ClientConn) closeForLostPing() { err := errors.New("http2: client connection lost") if f := cc.t.CountError; f != nil { f("conn_close_lost_ping") } - return cc.closeForError(err) + cc.closeForError(err) } // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not @@ -1994,7 +2008,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { // wake up RoundTrip if there is a pending request. cc.cond.Broadcast() - closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) @@ -2663,7 +2677,6 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { if fn := cc.t.CountError; fn != nil { fn("recv_goaway_" + f.ErrCode.stringToken()) } - } cc.setGoAway(f) return nil diff --git a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go index fc65e62fa7a..eaa896cb570 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/sys_zos_s390x.go @@ -5,6 +5,7 @@ package socket import ( + "net" "syscall" "unsafe" ) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go index 7e8f85b276d..579bcce5f28 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "os" "os/exec" "regexp" @@ -253,7 +254,7 @@ func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err } defer file.Close() - data, err := io.ReadAll(io.LimitReader(file, 1<<20)) + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) if err != nil || len(data) == 0 { // Cachefile exists, but no data found. Get new credential. return "", nil diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 683d2d271a3..95015648b43 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -178,5 +178,5 @@ func Verify(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 4c0850a45aa..cbee7a4e230 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -61,8 +61,8 @@ func (g *Group) Wait() error { // It blocks until the new goroutine can be added without the number of // active goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index bbaba18bc05..f3eb993bf24 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -6,7 +6,10 @@ package cpu import "runtime" -const cacheLineSize = 64 +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 func initOptions() { options = []option{ diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 884430b810c..0d12c0851ad 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -4,9 +4,7 @@ package unix -import ( - "unsafe" -) +import "unsafe" // IoctlRetInt performs an ioctl operation specified by req on a device // associated with opened file descriptor fd, and returns a non-negative @@ -217,3 +215,19 @@ func IoctlKCMAttach(fd int, info KCMAttach) error { func IoctlKCMUnattach(fd int, info KCMUnattach) error { return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info)) } + +// IoctlLoopGetStatus64 gets the status of the loop device associated with the +// file descriptor fd using the LOOP_GET_STATUS64 operation. +func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { + var value LoopInfo64 + if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil { + return nil, err + } + return &value, nil +} + +// IoctlLoopSetStatus64 sets the status of the loop device associated with the +// file descriptor fd using the LOOP_SET_STATUS64 operation. +func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { + return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 2cd0e916630..1b2b424a726 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -142,33 +142,33 @@ netbsd_arm64) mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_386) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m32" - mksyscall="go run mksyscall.go -l32 -openbsd" + mksyscall="go run mksyscall.go -l32 -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) + mkasm="go run mkasm.go" mkerrors="$mkerrors" - mksyscall="go run mksyscall.go -l32 -openbsd -arm" + mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_arm64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" @@ -214,11 +214,6 @@ esac if [ "$GOOSARCH" == "aix_ppc64" ]; then # aix/ppc64 script generates files instead of writing to stdin. echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; - elif [ "$GOOS" == "darwin" ]; then - # 1.12 and later, syscalls via libSystem - echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; - # 1.13 and later, syscalls via libSystem (including syscallPtr) - echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; elif [ "$GOOS" == "illumos" ]; then # illumos code generation requires a --illumos switch echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go"; diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go deleted file mode 100644 index 8ba89ed8694..00000000000 --- a/vendor/golang.org/x/sys/unix/str.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -package unix - -func itoa(val int) string { // do it here rather than with fmt to avoid dependency - if val < 0 { - return "-" + uitoa(uint(-val)) - } - return uitoa(uint(val)) -} - -func uitoa(val uint) string { - var buf [32]byte // big enough for int64 - i := len(buf) - 1 - for val >= 10 { - buf[i] = byte(val%10 + '0') - i-- - val /= 10 - } - buf[i] = byte(val + '0') - return string(buf[i:]) -} diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 649fa87405d..63e8c838317 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -29,8 +29,6 @@ import ( "bytes" "strings" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -82,13 +80,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go deleted file mode 100644 index b0098607c70..00000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.12 && !go1.13 -// +build darwin,go1.12,!go1.13 - -package unix - -import ( - "unsafe" -) - -const _SYS_GETDIRENTRIES64 = 344 - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // To implement this using libSystem we'd need syscall_syscallPtr for - // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall - // back to raw syscalls for this func on Go 1.12. - var p unsafe.Pointer - if len(buf) > 0 { - p = unsafe.Pointer(&buf[0]) - } else { - p = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - return n, errnoErr(e1) - } - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go deleted file mode 100644 index 1596426b1e2..00000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.13 -// +build darwin,go1.13 - -package unix - -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -//sys closedir(dir uintptr) (err error) -//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) - -func fdopendir(fd int) (dir uintptr, err error) { - r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) - dir = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fdopendir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // Simulate Getdirentries using fdopendir/readdir_r/closedir. - // We store the number of entries to skip in the seek - // offset of fd. See issue #31368. - // It's not the full required semantics, but should handle the case - // of calling Getdirentries or ReadDirent repeatedly. - // It won't handle assigning the results of lseek to *basep, or handle - // the directory being edited underfoot. - skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) - if err != nil { - return 0, err - } - - // We need to duplicate the incoming file descriptor - // because the caller expects to retain control of it, but - // fdopendir expects to take control of its argument. - // Just Dup'ing the file descriptor is not enough, as the - // result shares underlying state. Use Openat to make a really - // new file descriptor referring to the same directory. - fd2, err := Openat(fd, ".", O_RDONLY, 0) - if err != nil { - return 0, err - } - d, err := fdopendir(fd2) - if err != nil { - Close(fd2) - return 0, err - } - defer closedir(d) - - var cnt int64 - for { - var entry Dirent - var entryp *Dirent - e := readdir_r(d, &entry, &entryp) - if e != 0 { - return n, errnoErr(e) - } - if entryp == nil { - break - } - if skip > 0 { - skip-- - cnt++ - continue - } - - reclen := int(entry.Reclen) - if reclen > len(buf) { - // Not enough room. Return for now. - // The counter will let us know where we should start up again. - // Note: this strategy for suspending in the middle and - // restarting is O(n^2) in the length of the directory. Oh well. - break - } - - // Copy entry into return buffer. - var s []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - hdr.Data = unsafe.Pointer(&entry) - hdr.Cap = reclen - hdr.Len = reclen - copy(buf, s) - - buf = buf[reclen:] - n += reclen - cnt++ - } - // Set the seek offset of the input fd to record - // how many files we've already returned. - _, err = Seek(fd, cnt, 0 /* SEEK_SET */) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 4f87f16ea7c..1f63382182f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -19,6 +19,96 @@ import ( "unsafe" ) +//sys closedir(dir uintptr) (err error) +//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fdopendir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulate Getdirentries using fdopendir/readdir_r/closedir. + // We store the number of entries to skip in the seek + // offset of fd. See issue #31368. + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // We need to duplicate the incoming file descriptor + // because the caller expects to retain control of it, but + // fdopendir expects to take control of its argument. + // Just Dup'ing the file descriptor is not enough, as the + // result shares underlying state. Use Openat to make a really + // new file descriptor referring to the same directory. + fd2, err := Openat(fd, ".", O_RDONLY, 0) + if err != nil { + return 0, err + } + d, err := fdopendir(fd2) + if err != nil { + Close(fd2) + return 0, err + } + defer closedir(d) + + var cnt int64 + for { + var entry Dirent + var entryp *Dirent + e := readdir_r(d, &entry, &entryp) + if e != 0 { + return n, errnoErr(e) + } + if entryp == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index c3c4c698e07..b11ede89a96 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) { } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 82be61a2f98..9ed8eec6c28 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) { } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index cd58f1026c0..f8ac9824790 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)} err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d6f538f9e00..8e932036ec3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index 8ea6e96100a..cbe12227896 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)} err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index ecb0f27fb80..47146911f26 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "strconv" "syscall" "time" "unsafe" @@ -233,7 +234,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error { func Futimes(fd int, tv []Timeval) (err error) { // Believe it or not, this is the best we can do on Linux // (and is what glibc does). - return Utimes("/proc/self/fd/"+itoa(fd), tv) + return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv) } const ImplementsGetwd = true @@ -1891,17 +1892,28 @@ func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uint return int(ret), nil } -// issue 1435. -// On linux Setuid and Setgid only affects the current thread, not the process. -// This does not match what most callers expect so we must return an error -// here rather than letting the caller think that the call succeeded. - func Setuid(uid int) (err error) { - return EOPNOTSUPP + return syscall.Setuid(uid) +} + +func Setgid(gid int) (err error) { + return syscall.Setgid(gid) +} + +func Setreuid(ruid, euid int) (err error) { + return syscall.Setreuid(ruid, euid) +} + +func Setregid(rgid, egid int) (err error) { + return syscall.Setregid(rgid, egid) +} + +func Setresuid(ruid, euid, suid int) (err error) { + return syscall.Setresuid(ruid, euid, suid) } -func Setgid(uid int) (err error) { - return EOPNOTSUPP +func Setresgid(rgid, egid, sgid int) (err error) { + return syscall.Setresgid(rgid, egid, sgid) } // SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set. diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 518e476e6dd..ff5b5899d6d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -41,10 +41,6 @@ func setTimeval(sec, usec int64) Timeval { //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index f5e9d6bef10..9b270353298 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -46,11 +46,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index c1a7778f105..856ad1d635c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -62,10 +62,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index d83e2c65716..6422704bc52 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -39,11 +39,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 0b69c3eff96..59dab510e97 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -34,10 +34,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 98a2660b91f..bfef09a39eb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -37,11 +37,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index b8a18c0ad22..ab302509663 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -32,10 +32,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 4ed9e67c6df..eac1cf1acc8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -34,10 +34,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index db63d384c5b..4df56616b8f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -34,11 +34,7 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 925a748a39b..5f4243dea2c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -38,11 +38,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 6fcf277b0d7..d0a7d406685 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -34,11 +34,7 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 02a45d9cc06..f5c793be26d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -31,11 +31,7 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go new file mode 100644 index 00000000000..5930a8972b1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (openbsd && 386) || (openbsd && amd64) || (openbsd && arm) || (openbsd && arm64) +// +build openbsd,386 openbsd,amd64 openbsd,arm openbsd,arm64 + +package unix + +import _ "unsafe" + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall syscall.syscall +//go:linkname syscall_syscall6 syscall.syscall6 +//go:linkname syscall_syscall10 syscall.syscall10 +//go:linkname syscall_rawSyscall syscall.rawSyscall +//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 + +func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) { + return syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, 0) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b5ec457cdcc..8c6f4092abe 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -750,8 +750,8 @@ type EventPort struct { // we should handle things gracefully. To do so, we need to keep an extra // reference to the cookie around until the event is processed // thus the otherwise seemingly extraneous "cookies" map - // The key of this map is a pointer to the corresponding &fCookie.cookie - cookies map[*interface{}]*fileObjCookie + // The key of this map is a pointer to the corresponding fCookie + cookies map[*fileObjCookie]struct{} } // PortEvent is an abstraction of the port_event C struct. @@ -778,7 +778,7 @@ func NewEventPort() (*EventPort, error) { port: port, fds: make(map[uintptr]*fileObjCookie), paths: make(map[string]*fileObjCookie), - cookies: make(map[*interface{}]*fileObjCookie), + cookies: make(map[*fileObjCookie]struct{}), } return e, nil } @@ -799,6 +799,7 @@ func (e *EventPort) Close() error { } e.fds = nil e.paths = nil + e.cookies = nil return nil } @@ -826,17 +827,16 @@ func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, coo if _, found := e.paths[path]; found { return fmt.Errorf("%v is already associated with this Event Port", path) } - fobj, err := createFileObj(path, stat) + fCookie, err := createFileObjCookie(path, stat, cookie) if err != nil { return err } - fCookie := &fileObjCookie{fobj, cookie} - _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fCookie.fobj)), events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } e.paths[path] = fCookie - e.cookies[&fCookie.cookie] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -858,7 +858,7 @@ func (e *EventPort) DissociatePath(path string) error { if err == nil { // dissociate was successful, safe to delete the cookie fCookie := e.paths[path] - delete(e.cookies, &fCookie.cookie) + delete(e.cookies, fCookie) } delete(e.paths, path) return err @@ -871,13 +871,16 @@ func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) erro if _, found := e.fds[fd]; found { return fmt.Errorf("%v is already associated with this Event Port", fd) } - fCookie := &fileObjCookie{nil, cookie} - _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + fCookie, err := createFileObjCookie("", nil, cookie) + if err != nil { + return err + } + _, err = port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } e.fds[fd] = fCookie - e.cookies[&fCookie.cookie] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -896,27 +899,31 @@ func (e *EventPort) DissociateFd(fd uintptr) error { if err == nil { // dissociate was successful, safe to delete the cookie fCookie := e.fds[fd] - delete(e.cookies, &fCookie.cookie) + delete(e.cookies, fCookie) } delete(e.fds, fd) return err } -func createFileObj(name string, stat os.FileInfo) (*fileObj, error) { - fobj := new(fileObj) - bs, err := ByteSliceFromString(name) - if err != nil { - return nil, err - } - fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) - s := stat.Sys().(*syscall.Stat_t) - fobj.Atim.Sec = s.Atim.Sec - fobj.Atim.Nsec = s.Atim.Nsec - fobj.Mtim.Sec = s.Mtim.Sec - fobj.Mtim.Nsec = s.Mtim.Nsec - fobj.Ctim.Sec = s.Ctim.Sec - fobj.Ctim.Nsec = s.Ctim.Nsec - return fobj, nil +func createFileObjCookie(name string, stat os.FileInfo, cookie interface{}) (*fileObjCookie, error) { + fCookie := new(fileObjCookie) + fCookie.cookie = cookie + if name != "" && stat != nil { + fCookie.fobj = new(fileObj) + bs, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + fCookie.fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) + s := stat.Sys().(*syscall.Stat_t) + fCookie.fobj.Atim.Sec = s.Atim.Sec + fCookie.fobj.Atim.Nsec = s.Atim.Nsec + fCookie.fobj.Mtim.Sec = s.Mtim.Sec + fCookie.fobj.Mtim.Nsec = s.Mtim.Nsec + fCookie.fobj.Ctim.Sec = s.Ctim.Sec + fCookie.fobj.Ctim.Nsec = s.Ctim.Nsec + } + return fCookie, nil } // GetOne wraps port_get(3c) and returns a single PortEvent. @@ -929,44 +936,50 @@ func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) { p := new(PortEvent) e.mu.Lock() defer e.mu.Unlock() - e.peIntToExt(pe, p) + err = e.peIntToExt(pe, p) + if err != nil { + return nil, err + } return p, nil } // peIntToExt converts a cgo portEvent struct into the friendlier PortEvent // NOTE: Always call this function while holding the e.mu mutex -func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) { +func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) error { + if e.cookies == nil { + return fmt.Errorf("this EventPort is already closed") + } peExt.Events = peInt.Events peExt.Source = peInt.Source - cookie := (*interface{})(unsafe.Pointer(peInt.User)) - peExt.Cookie = *cookie + fCookie := (*fileObjCookie)(unsafe.Pointer(peInt.User)) + _, found := e.cookies[fCookie] + + if !found { + panic("unexpected event port address; may be due to kernel bug; see https://go.dev/issue/54254") + } + peExt.Cookie = fCookie.cookie + delete(e.cookies, fCookie) + switch peInt.Source { case PORT_SOURCE_FD: - delete(e.cookies, cookie) peExt.Fd = uintptr(peInt.Object) // Only remove the fds entry if it exists and this cookie matches if fobj, ok := e.fds[peExt.Fd]; ok { - if &fobj.cookie == cookie { + if fobj == fCookie { delete(e.fds, peExt.Fd) } } case PORT_SOURCE_FILE: - if fCookie, ok := e.cookies[cookie]; ok && uintptr(unsafe.Pointer(fCookie.fobj)) == uintptr(peInt.Object) { - // Use our stashed reference rather than using unsafe on what we got back - // the unsafe version would be (*fileObj)(unsafe.Pointer(uintptr(peInt.Object))) - peExt.fobj = fCookie.fobj - } else { - panic("mismanaged memory") - } - delete(e.cookies, cookie) + peExt.fobj = fCookie.fobj peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name))) // Only remove the paths entry if it exists and this cookie matches if fobj, ok := e.paths[peExt.Path]; ok { - if &fobj.cookie == cookie { + if fobj == fCookie { delete(e.paths, peExt.Path) } } } + return nil } // Pending wraps port_getn(3c) and returns how many events are pending. @@ -990,7 +1003,7 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) got := uint32(min) max := uint32(len(s)) var err error - ps := make([]portEvent, max, max) + ps := make([]portEvent, max) _, err = port_getn(e.port, &ps[0], max, &got, timeout) // got will be trustworthy with ETIME, but not any other error. if err != nil && err != ETIME { @@ -998,8 +1011,18 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) } e.mu.Lock() defer e.mu.Unlock() + valid := 0 for i := 0; i < int(got); i++ { - e.peIntToExt(&ps[i], &s[i]) + err2 := e.peIntToExt(&ps[i], &s[i]) + if err2 != nil { + if valid == 0 && err == nil { + // If err2 is the only error and there are no valid events + // to return, return it to the caller. + err = err2 + } + break + } + valid = i + 1 } - return int(got), err + return valid, err } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 1ff5060b512..9f7535607e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -13,8 +13,6 @@ import ( "sync" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -117,11 +115,7 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d } // Use unsafe to convert addr into a []byte. - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = length - hdr.Len = length + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), length) // Register mapping in m and return it. p := &b[cap(b)-1] diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 0bb4c8de557..5bb41d17bc4 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -7,11 +7,7 @@ package unix -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) +import "unsafe" // SysvShmAttach attaches the Sysv shared memory segment associated with the // shared memory identifier id. @@ -34,12 +30,7 @@ func SysvShmAttach(id int, addr uintptr, flag int) ([]byte, error) { } // Use unsafe to convert addr into a []byte. - // TODO: convert to unsafe.Slice once we can assume Go 1.17 - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = int(info.Segsz) - hdr.Len = int(info.Segsz) + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.Segsz)) return b, nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 25df1e37801..663b3779de2 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -160,13 +160,12 @@ func Lremovexattr(link string, attr string) (err error) { } func Listxattr(file string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) destsiz := len(dest) // FreeBSD won't allow you to list xattrs from multiple namespaces - s := 0 + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + stmp, e := ListxattrNS(file, nsid, dest[pos:]) /* Errors accessing system attrs are ignored so that * we can implement the Linux-like behavior of omitting errors that @@ -175,66 +174,102 @@ func Listxattr(file string, dest []byte) (sz int, err error) { * Linux will still error if we ask for user attributes on a file that * we don't have read permissions on, so don't ignore those errors */ - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Flistxattr(fd int, dest []byte) (sz int, err error) { +func ListxattrNS(file string, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := FlistxattrNS(fd, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Llistxattr(link string, dest []byte) (sz int, err error) { +func FlistxattrNS(fd int, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := LlistxattrNS(link, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) + } + + return s, nil +} + +func LlistxattrNS(link string, nsid int, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err } return s, nil diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go deleted file mode 100644 index a06eb093242..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && amd64 && go1.13 -// +build darwin,amd64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s deleted file mode 100644 index f5bb40eda9e..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm.go darwin amd64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 467deed7633..c2461c49679 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && amd64 && go1.12 -// +build darwin,amd64,go1.12 +//go:build darwin && amd64 +// +build darwin,amd64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index b41467a0e50..95fe4c0eb96 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,11 +1,14 @@ // go run mkasm.go darwin amd64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go deleted file mode 100644 index cec595d553a..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && arm64 && go1.13 -// +build darwin,arm64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s deleted file mode 100644 index 0c3f76bc203..00000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm.go darwin arm64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 35938d34ff8..26a0fdc505b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && arm64 && go1.12 -// +build darwin,arm64,go1.12 +//go:build darwin && arm64 +// +build darwin,arm64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index e1f9204a208..efa5b4c987c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,11 +1,14 @@ // go run mkasm.go darwin arm64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 88af526b7e2..c81b0ad4777 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -287,46 +287,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2a0c4aa6a63..2206bce7f4d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,36 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -374,16 +344,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 4882bde3af0..edf6b39f161 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -412,46 +412,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 9f8c24e4343..190609f2140 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,36 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -329,16 +299,6 @@ func setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 523f2ba03e4..806ffd1e125 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -223,46 +223,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index d7d6f42441b..5f984cbb1ca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 7f1f8e65339..46fc380a40e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index f933d0f51a1..cbd0d4dadba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 297d0a99822..0c13d15f07c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index 2e32e7a449f..e01432aed51 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -308,46 +308,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 3c531704647..13c7ee7baff 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index a00c6744ecb..02d0c0fd61e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 1239cc2de9c..9fee3b1d239 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -269,36 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -309,16 +279,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index e0dabc60278..647bbfecd6a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,36 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -359,16 +329,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 368623c0f2e..ada057f8914 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,36 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { @@ -369,16 +339,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a057fc5d351..2925fe0a7b7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// go run mksyscall.go -l32 -openbsd -libc -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1490,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1503,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1511,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1524,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s new file mode 100644 index 00000000000..75eb2f5f3f7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd 386 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 04db8fa2fea..98446d2b954 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1490,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1503,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1511,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1524,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s new file mode 100644 index 00000000000..243a6663ce6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd amd64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 69f80300674..8da6791d1e3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go +// go run mksyscall.go -l32 -openbsd -arm -libc -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_ftruncate_trampoline_addr, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1490,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1503,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1511,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1524,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s new file mode 100644 index 00000000000..9ad116d9fbd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd arm +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index c96a505178f..800aab6e3e7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +607,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +683,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1490,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1503,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1511,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1524,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1532,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s new file mode 100644 index 00000000000..4efeff9abbf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd arm64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) + +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) + +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) + +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) + +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) + +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) + +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) + +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) + +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) + +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) + +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) + +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) + +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) + +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) + +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) + +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) + +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) + +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) + +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) + +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) + +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) + +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) + +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) + +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) + +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) + +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) + +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) + +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) + +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) + +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) + +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) + +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) + +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) + +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) + +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) + +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) + +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) + +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) + +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) + +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) + +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) + +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) + +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) + +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) + +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) + +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) + +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) + +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) + +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) + +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) + +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) + +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) + +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) + +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) + +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) + +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) + +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) + +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) + +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) + +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) + +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) + +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) + +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) + +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) + +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) + +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) + +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) + +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) + +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) + +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) + +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) + +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) + +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) + +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) + +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) + +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) + +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) + +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) + +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) + +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) + +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) + +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) + +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) + +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) + +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) + +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) + +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) + +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) + +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) + +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) + +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) + +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) + +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) + +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) + +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) + +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) + +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) + +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) + +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) + +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) + +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) + +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) + +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) + +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) + +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) + +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) + +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) + +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) + +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) + +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) + +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) + +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) + +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) + +GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) + +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) + +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) + +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) + +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) + +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) + +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) + +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) + +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) + +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) + +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) + +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) + +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) + +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) + +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) + +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) + +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) + +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) + +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 817edbf95c0..597733813e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index ea453614e69..16af2918994 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 467971eed66..f59b18a9779 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 32eec5ed56f..721ef591032 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index dea0c9a607d..d9c78cdcbc4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -294,7 +294,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -312,6 +312,17 @@ type __Siginfo struct { Value [4]byte _ [32]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte +} type Sigset_t struct { Val [4]uint32 @@ -350,8 +361,8 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index da0ea0d608a..26991b16559 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -310,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -354,8 +366,8 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index da8f7404509..f8324e7e7f4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -293,7 +293,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -312,6 +312,18 @@ type __Siginfo struct { _ [32]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -337,8 +349,8 @@ type FpExtendedPrecision struct { type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index d69988e5e58..4220411f341 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -310,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -334,8 +346,8 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index d6fd9e88382..0660fd45c7c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -310,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -335,8 +347,8 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 - Offs *byte - Addr *byte + Offs uintptr + Addr uintptr Len uint64 } diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go index 14027da3f3f..f8126482fa5 100644 --- a/vendor/golang.org/x/sys/windows/setupapi_windows.go +++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -296,7 +296,7 @@ const ( // Flag to indicate that the sorting from the INF file should be used. DI_INF_IS_SORTED DI_FLAGS = 0x00008000 - // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + // Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 // Flag that prevents ConfigMgr from removing/re-enumerating devices during device diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 72074d582f1..8732cdb957f 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -30,8 +30,6 @@ import ( "strings" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -83,13 +81,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index e27913817a7..3f2cbb638fd 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -138,13 +138,7 @@ func UTF16PtrToString(p *uint16) string { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - var s []uint16 - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(utf16.Decode(s)) + return string(utf16.Decode(unsafe.Slice(p, n))) } func Getpagesize() int { return 4096 } @@ -364,6 +358,15 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) +//sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows +//sys EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows +//sys GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW +//sys GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow +//sys GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow +//sys IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow +//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode +//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible +//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -439,6 +442,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable //sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable +// Desktop Window Manager API (Dwmapi) +//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute +//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index f9eaca528ed..0c4add97410 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3213,3 +3213,48 @@ type ModuleInfo struct { } const ALL_PROCESSOR_GROUPS = 0xFFFF + +type Rect struct { + Left int32 + Top int32 + Right int32 + Bottom int32 +} + +type GUIThreadInfo struct { + Size uint32 + Flags uint32 + Active HWND + Focus HWND + Capture HWND + MenuOwner HWND + MoveSize HWND + CaretHandle HWND + CaretRect Rect +} + +const ( + DWMWA_NCRENDERING_ENABLED = 1 + DWMWA_NCRENDERING_POLICY = 2 + DWMWA_TRANSITIONS_FORCEDISABLED = 3 + DWMWA_ALLOW_NCPAINT = 4 + DWMWA_CAPTION_BUTTON_BOUNDS = 5 + DWMWA_NONCLIENT_RTL_LAYOUT = 6 + DWMWA_FORCE_ICONIC_REPRESENTATION = 7 + DWMWA_FLIP3D_POLICY = 8 + DWMWA_EXTENDED_FRAME_BOUNDS = 9 + DWMWA_HAS_ICONIC_BITMAP = 10 + DWMWA_DISALLOW_PEEK = 11 + DWMWA_EXCLUDED_FROM_PEEK = 12 + DWMWA_CLOAK = 13 + DWMWA_CLOAKED = 14 + DWMWA_FREEZE_REPRESENTATION = 15 + DWMWA_PASSIVE_UPDATE_MODE = 16 + DWMWA_USE_HOSTBACKDROPBRUSH = 17 + DWMWA_USE_IMMERSIVE_DARK_MODE = 20 + DWMWA_WINDOW_CORNER_PREFERENCE = 33 + DWMWA_BORDER_COLOR = 34 + DWMWA_CAPTION_COLOR = 35 + DWMWA_TEXT_COLOR = 36 + DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 52d4742cb94..96ba8559c37 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -40,6 +40,7 @@ var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") + moddwmapi = NewLazySystemDLL("dwmapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") modmswsock = NewLazySystemDLL("mswsock.dll") @@ -175,6 +176,8 @@ var ( procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") + procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") @@ -444,9 +447,18 @@ var ( procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procEnumChildWindows = moduser32.NewProc("EnumChildWindows") + procEnumWindows = moduser32.NewProc("EnumWindows") procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procGetClassNameW = moduser32.NewProc("GetClassNameW") + procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") + procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") + procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procIsWindow = moduser32.NewProc("IsWindow") + procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") + procIsWindowVisible = moduser32.NewProc("IsWindowVisible") procMessageBoxW = moduser32.NewProc("MessageBoxW") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") @@ -1525,6 +1537,22 @@ func DnsRecordListFree(rl *DNSRecord, freetype uint32) { return } +func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -3802,6 +3830,19 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui return } +func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { + syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + return +} + +func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitWindowsEx(flags uint32, reason uint32) (err error) { r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) if r1 == 0 { @@ -3810,6 +3851,35 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { return } +func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { + r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + copied = int32(r0) + if copied == 0 { + err = errnoErr(e1) + } + return +} + +func GetDesktopWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetForegroundWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -3825,6 +3895,24 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { return } +func IsWindow(hwnd HWND) (isWindow bool) { + r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + isWindow = r0 != 0 + return +} + +func IsWindowUnicode(hwnd HWND) (isUnicode bool) { + r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + isUnicode = r0 != 0 + return +} + +func IsWindowVisible(hwnd HWND) (isVisible bool) { + r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + isVisible = r0 != 0 + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 12e813cc711..79a6bb65456 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -125,7 +125,7 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 2d3e00edc9f..75248fd16ec 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -382,11 +382,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // -// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index 61720ec2ea1..f5d826c2a19 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -7,7 +7,7 @@ // // This package is DEPRECATED. Users should instead use, // -// service, err := NewService(..., option.WithAPIKey(...)) +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 41642001183..e450fba31cb 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -8,31 +8,31 @@ // // For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/iamcredentials/v1" -// ... -// ctx := context.Background() -// iamcredentialsService, err := iamcredentials.NewService(ctx) +// import "google.golang.org/api/iamcredentials/v1" +// ... +// ctx := context.Background() +// iamcredentialsService, err := iamcredentials.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package iamcredentials // import "google.golang.org/api/iamcredentials/v1" @@ -519,11 +519,11 @@ type ProjectsServiceAccountsGenerateAccessTokenCall struct { // GenerateAccessToken: Generates an OAuth 2.0 access token for a // service account. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, generateaccesstokenrequest *GenerateAccessTokenRequest) *ProjectsServiceAccountsGenerateAccessTokenCall { c := &ProjectsServiceAccountsGenerateAccessTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -666,11 +666,11 @@ type ProjectsServiceAccountsGenerateIdTokenCall struct { // GenerateIdToken: Generates an OpenID Connect ID token for a service // account. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateidtokenrequest *GenerateIdTokenRequest) *ProjectsServiceAccountsGenerateIdTokenCall { c := &ProjectsServiceAccountsGenerateIdTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -813,11 +813,11 @@ type ProjectsServiceAccountsSignBlobCall struct { // SignBlob: Signs a blob using a service account's system-managed // private key. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall { c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -960,11 +960,11 @@ type ProjectsServiceAccountsSignJwtCall struct { // SignJwt: Signs a JWT using a service account's system-managed private // key. // -// - name: The resource name of the service account for which the -// credentials are requested, in the following format: -// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` -// wildcard character is required; replacing it with a project ID is -// invalid. +// - name: The resource name of the service account for which the +// credentials are requested, in the following format: +// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` +// wildcard character is required; replacing it with a project ID is +// invalid. func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall { c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b067a179b7a..32d52413b30 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -70,11 +70,12 @@ const ( // // - A self-signed JWT flow will be executed if the following conditions are // met: -// (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users -// (2) No service account impersontation +// +// (1) At least one of the following is true: +// (a) No scope is provided +// (b) Scope for self-signed JWT flow is enabled +// (c) Audiences are explicitly provided by users +// (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go index c01e32189f4..1b770464110 100644 --- a/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/vendor/google.golang.org/api/internal/gensupport/json.go @@ -13,9 +13,10 @@ import ( // MarshalJSON returns a JSON encoding of schema containing only selected fields. // A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// // The JSON key for each selected field is taken from the field's json: struct tag. func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { if len(forceSendFields) == 0 && len(nullFields) == 0 { diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index d14a22470c1..66caf24cb13 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -289,13 +289,12 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB // be retried because the data is stored in the MediaBuffer. media, _, _, _ = mi.buffer.Chunk() } + toCleanup := []io.Closer{} if media != nil { fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - toCleanup := []io.Closer{ - combined, - } + toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) @@ -309,18 +308,30 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB return r, nil } } - cleanup = func() { - for _, closer := range toCleanup { - _ = closer.Close() - } - - } reqHeaders.Set("Content-Type", ctype) body = combined } if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + // This happens when initiating a resumable upload session. + // The initial request contains a JSON body rather than media. + // It can be retried with a getBody function that re-creates the request body. + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } + } reqHeaders.Set("X-Upload-Content-Type", mi.mType) } + // Ensure that any bodies created in getBody are cleaned up. + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } return body, getBody, cleanup } diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go index 6703721ffde..1a30d2ca252 100644 --- a/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/vendor/google.golang.org/api/internal/gensupport/params.go @@ -37,7 +37,7 @@ func (u URLParams) SetMulti(key string, values []string) { u[key] = values } -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func (u URLParams) Encode() string { return url.Values(u).Encode() diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 70a8e01c1b3..dd24139b366 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -17,6 +17,27 @@ import ( "github.com/googleapis/gax-go/v2" ) +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(target error) bool { + return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) +} + // SendRequest sends a single HTTP request using the given client. // If ctx is non-nil, it calls all hooks, then sends the request with // req.WithContext, then calls any functions returned by the hooks in @@ -96,12 +117,12 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r for { select { case <-ctx.Done(): - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err == nil { - err = ctx.Err() + // If we got an error and the context has been canceled, return an error acknowledging + // both the context cancelation and the service error. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() case <-time.After(pause): } @@ -110,10 +131,10 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r // select is satisfied at the same time, Go will choose one arbitrarily. // That can cause an operation to go through even if the context was // canceled before. - if err == nil { - err = ctx.Err() + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() } invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 046c62ec18f..ce0e80c4e80 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.91.0" +const Version = "0.97.0" diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 60743c63e2d..f56a8c1d906 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -82,6 +82,9 @@ func (w withEndpoint) Apply(o *internal.DialSettings) { // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } @@ -287,10 +290,10 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // service account SA2 while using delegate service accounts DSA1 and DSA2, // the following must be true: // -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. // // The resulting impersonated credential will either have the default scopes of // the client being instantiating or the scopes from WithScopes if provided. @@ -305,9 +308,9 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // // This is an EXPERIMENTAL API and may be changed or removed in the future. // -// This option has been replaced by `impersonate` package: +// Deprecated: This option has been replaced by `impersonate` package: // `google.golang.org/api/impersonate`. Please use the `impersonate` package -// instead. +// instead with the WithTokenSource option. func ImpersonateCredentials(target string, delegates ...string) ClientOption { return impersonateServiceAccount{ target: target, diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 353367a7c1b..35cea853eb1 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -10,35 +10,35 @@ // // For product documentation, see: https://developers.google.com/storage/docs/json_api/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/storage/v1" -// ... -// ctx := context.Background() -// storageService, err := storage.NewService(ctx) +// import "google.golang.org/api/storage/v1" +// ... +// ctx := context.Background() +// storageService, err := storage.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) +// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) +// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package storage // import "google.golang.org/api/storage/v1" @@ -822,7 +822,7 @@ func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type BucketLifecycleRuleCondition struct { // Age: Age of an object (in days). This condition is satisfied when an // object reaches the specified age. - Age int64 `json:"age,omitempty"` + Age *int64 `json:"age,omitempty"` // CreatedBefore: A date in RFC 3339 format with only the date part (for // instance, "2013-01-15"). This condition is satisfied when an object @@ -2484,10 +2484,10 @@ type BucketAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2614,10 +2614,10 @@ type BucketAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3094,10 +3094,10 @@ type BucketAccessControlsPatchCall struct { // Patch: Patches an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3260,10 +3260,10 @@ type BucketAccessControlsUpdateCall struct { // Update: Updates an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -3598,8 +3598,9 @@ func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { c.urlParams_.Set("projection", projection) return c @@ -3968,14 +3969,22 @@ func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsert // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -3987,16 +3996,26 @@ func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCa // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -4009,8 +4028,9 @@ func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { c.urlParams_.Set("projection", projection) return c @@ -4245,8 +4265,9 @@ func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsListCall) Projection(projection string) *BucketsListCall { c.urlParams_.Set("projection", projection) return c @@ -4453,9 +4474,9 @@ type BucketsLockRetentionPolicyCall struct { // LockRetentionPolicy: Locks retention policy on a bucket. // -// - bucket: Name of a bucket. -// - ifMetagenerationMatch: Makes the operation conditional on whether -// bucket's current metageneration matches the given value. +// - bucket: Name of a bucket. +// - ifMetagenerationMatch: Makes the operation conditional on whether +// bucket's current metageneration matches the given value. func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -4641,14 +4662,22 @@ func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -4660,16 +4689,26 @@ func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -4680,8 +4719,9 @@ func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { c.urlParams_.Set("projection", projection) return c @@ -5253,14 +5293,22 @@ func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // predefined set of access controls to this bucket. // // Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and +// +// "authenticatedRead" - Project team owners get OWNER access, and +// // allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// // their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// // get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and +// +// "publicReadWrite" - Project team owners get OWNER access, and +// // allUsers get WRITER access. func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -5272,16 +5320,26 @@ func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCa // object access controls to this bucket. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) @@ -5292,8 +5350,9 @@ func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { c.urlParams_.Set("projection", projection) return c @@ -5609,10 +5668,10 @@ type DefaultObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the default object ACL entry for the // specified entity on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5739,10 +5798,10 @@ type DefaultObjectAccessControlsGetCall struct { // Get: Returns the default object ACL entry for the specified entity on // the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6249,10 +6308,10 @@ type DefaultObjectAccessControlsPatchCall struct { // Patch: Patches a default object ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -6415,10 +6474,10 @@ type DefaultObjectAccessControlsUpdateCall struct { // Update: Updates a default object ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7195,12 +7254,12 @@ type ObjectAccessControlsDeleteCall struct { // Delete: Permanently deletes the ACL entry for the specified entity on // the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7351,12 +7410,12 @@ type ObjectAccessControlsGetCall struct { // Get: Returns the ACL entry for the specified entity on the specified // object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7546,9 +7605,9 @@ type ObjectAccessControlsInsertCall struct { // Insert: Creates a new ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7725,9 +7784,9 @@ type ObjectAccessControlsListCall struct { // List: Retrieves ACL entries on the specified object. // -// - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7909,12 +7968,12 @@ type ObjectAccessControlsPatchCall struct { // Patch: Patches an ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8101,12 +8160,12 @@ type ObjectAccessControlsUpdateCall struct { // Update: Updates an ACL entry on the specified object. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -8293,11 +8352,11 @@ type ObjectsComposeCall struct { // Compose: Concatenates a list of existing objects into a new object in // the same bucket. // -// - destinationBucket: Name of the bucket containing the source -// objects. The destination object is stored in this bucket. -// - destinationObject: Name of the new object. For information about -// how to URL encode object names to be path safe, see Encoding URI -// Path Parts. +// - destinationBucket: Name of the bucket containing the source +// objects. The destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about +// how to URL encode object names to be path safe, see Encoding URI +// Path Parts. func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket @@ -8311,16 +8370,26 @@ func (r *ObjectsService) Compose(destinationBucket string, destinationObject str // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -8347,7 +8416,9 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) // KmsKeyName sets the optional parameter "kmsKeyName": Resource name of // the Cloud KMS key, of the form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) @@ -8550,18 +8621,18 @@ type ObjectsCopyCall struct { // Copy: Copies a source object to a destination object. Optionally // overrides metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any.For information about how to URL encode object names to be path +// safe, see Encoding URI Path Parts. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -8576,7 +8647,9 @@ func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinat // "destinationKmsKeyName": Resource name of the Cloud KMS key, of the // form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) @@ -8588,16 +8661,26 @@ func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *O // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -8682,8 +8765,9 @@ func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationN // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { c.urlParams_.Set("projection", projection) return c @@ -8962,9 +9046,9 @@ type ObjectsDeleteCall struct { // if versioning is not enabled for the bucket, or if the generation // parameter is used. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9165,9 +9249,9 @@ type ObjectsGetCall struct { // Get: Retrieves an object or its metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9223,8 +9307,9 @@ func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { c.urlParams_.Set("projection", projection) return c @@ -9453,9 +9538,9 @@ type ObjectsGetIamPolicyCall struct { // GetIamPolicy: Returns an IAM policy for the specified object. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9640,8 +9725,8 @@ type ObjectsInsertCall struct { // Insert: Stores a new object and metadata. // -// - bucket: Name of the bucket in which to store the new object. -// Overrides the provided object metadata's bucket value, if any. +// - bucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9699,7 +9784,9 @@ func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // KmsKeyName sets the optional parameter "kmsKeyName": Resource name of // the Cloud KMS key, of the form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) @@ -9719,16 +9806,26 @@ func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -9740,8 +9837,9 @@ func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCa // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { c.urlParams_.Set("projection", projection) return c @@ -10147,8 +10245,9 @@ func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { c.urlParams_.Set("projection", projection) return c @@ -10403,9 +10502,9 @@ type ObjectsPatchCall struct { // Patch: Patches an object's metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10462,16 +10561,26 @@ func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -10482,8 +10591,9 @@ func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { c.urlParams_.Set("projection", projection) return c @@ -10710,18 +10820,18 @@ type ObjectsRewriteCall struct { // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts. func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket @@ -10736,7 +10846,9 @@ func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, desti // "destinationKmsKeyName": Resource name of the Cloud KMS key, of the // form // projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object +// +// that will be used to encrypt the object. Overrides the object +// // metadata's kms_key_name value, if any. func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) @@ -10748,16 +10860,26 @@ func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) // to the destination object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) @@ -10855,8 +10977,9 @@ func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall i // specifies the acl property, when it defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { c.urlParams_.Set("projection", projection) return c @@ -11156,9 +11279,9 @@ type ObjectsSetIamPolicyCall struct { // SetIamPolicy: Updates an IAM policy for the specified object. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11337,10 +11460,10 @@ type ObjectsTestIamPermissionsCall struct { // TestIamPermissions: Tests a set of permissions on the given object to // see which, if any, are held by the caller. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -// - permissions: Permissions to test. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. +// - permissions: Permissions to test. func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11533,9 +11656,9 @@ type ObjectsUpdateCall struct { // Update: Updates an object's metadata. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11592,16 +11715,26 @@ func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // predefined set of access controls to this object. // // Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and +// +// "authenticatedRead" - Object owner gets OWNER access, and +// // allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// // project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// // team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// // members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// // READER access. func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) @@ -11612,8 +11745,9 @@ func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCa // properties to return. Defaults to full. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { c.urlParams_.Set("projection", projection) return c @@ -11902,8 +12036,9 @@ func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { // properties to return. Defaults to noAcl. // // Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { c.urlParams_.Set("projection", projection) return c @@ -12412,9 +12547,9 @@ type ProjectsHmacKeysGetCall struct { // Get: Retrieves an HMAC key's metadata // -// - accessId: Name of the HMAC key. -// - projectId: Project ID owning the service account of the requested -// key. +// - accessId: Name of the HMAC key. +// - projectId: Project ID owning the service account of the requested +// key. func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -12822,9 +12957,9 @@ type ProjectsHmacKeysUpdateCall struct { // Update: Updates the state of an HMAC key. See the HMAC Key resource // descriptor for valid states. // -// - accessId: Name of the HMAC key being updated. -// - projectId: Project ID owning the service account of the updated -// key. +// - accessId: Name of the HMAC key being updated. +// - projectId: Project ID owning the service account of the updated +// key. func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go index 071586e9446..78004f0475f 100644 --- a/vendor/google.golang.org/api/transport/internal/dca/dca.go +++ b/vendor/google.golang.org/api/transport/internal/dca/dca.go @@ -6,17 +6,17 @@ // Authentication according to https://google.aip.dev/auth/4114 // // The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. // // Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. // // We would like to avoid introducing client-side logic that parses whether the // endpoint override is an mTLS url, since the url pattern may change at anytime. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 4f34ab73cba..6f11b7c500f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -127,19 +127,19 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } // // This enables an HTTP REST to gRPC mapping as below: // @@ -151,21 +151,21 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // automatically become HTTP query parameters if there is no HTTP request body. // For example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } // // This enables a HTTP JSON to RPC mapping as below: // @@ -186,18 +186,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specifies the mapping. Consider a REST update method on the // message resource collection: // -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } // // The following HTTP JSON to RPC mapping is enabled, where the // representation of the JSON in the request body is determined by @@ -213,19 +213,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body. This enables the following alternative definition of // the update method: // -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } // // The following HTTP JSON to RPC mapping is enabled: // @@ -243,20 +242,20 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // It is possible to define multiple HTTP methods for one RPC by using // the `additional_bindings` option. Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } // // This enables the following two alternative HTTP JSON to RPC mappings: // @@ -268,15 +267,15 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ## Rules for HTTP mapping // -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. // 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields // are passed via URL path and HTTP request body. // 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all @@ -284,12 +283,12 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ### Path template syntax // -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; // // The syntax `*` matches a single URL path segment. The syntax `**` matches // zero or more URL path segments, which must be the last part of the URL path @@ -338,11 +337,11 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} // // ## Special notes // diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 6515668d34f..13ea54b2940 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -157,45 +157,45 @@ func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { // // Example: // -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" // // Sometimes, resources have multiple patterns, typically because they can // live under multiple parents. // // Example: // -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" type ResourceDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index dd45cf6e6c1..6707a7b1c1d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -44,71 +44,71 @@ const ( // // Message Definition: // -// message Request { -// // The name of the Table -// // Values can be of the following formats: -// // - `projects//tables/` -// // - `projects//instances//tables/
` -// // - `region//zones//tables/
` -// string table_name = 1; -// -// // This value specifies routing for replication. -// // It can be in the following formats: -// // - `profiles/` -// // - a legacy `profile_id` that can be any string -// string app_profile_id = 2; -// } +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/
` +// // - `projects//instances//tables/
` +// // - `region//zones//tables/
` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } // // Example message: // -// { -// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, -// app_profile_id: profiles/prof_qux -// } +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } // // The routing header consists of one or multiple key-value pairs. Every key // and value must be percent-encoded, and joined together in the format of // `key1=value1&key2=value2`. // In the examples below I am skipping the percent-encoding for readablity. // -// Example 1 +// # Example 1 // // Extracting a field from the request to put into the routing header // unchanged, with the key equal to the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`. -// routing_parameters { -// field: "app_profile_id" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; // // result: // -// x-goog-request-params: app_profile_id=profiles/prof_qux +// x-goog-request-params: app_profile_id=profiles/prof_qux // -// Example 2 +// # Example 2 // // Extracting a field from the request to put into the routing header // unchanged, with the key different from the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`, but name it `routing_id` in the header. -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 3 +// # Example 3 // // Extracting a field from the request to put into the routing // header, while matching a path template syntax on the field's value. @@ -116,91 +116,91 @@ const ( // NB: it is more useful to send nothing than to send garbage for the purpose // of dynamic routing, since garbage pollutes cache. Thus the matching. // -// Sub-example 3a +// # Sub-example 3a // // The field matches the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with project-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Sub-example 3b +// # Sub-example 3b // // The field does not match the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with region-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; // // result: // -// +// // -// Sub-example 3c +// # Sub-example 3c // // Multiple alternative conflictingly named path templates are // specified. The one that matches is used to construct the header. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed, whether -// // using the region- or projects-based syntax. +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. // -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Example 4 +// # Example 4 // // Extracting a single routing header key-value pair by matching a // template syntax on (a part of) a single request field. // // annotation: // -// option (google.api.routing) = { -// // Take just the project id from the `table_name` field. -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// }; +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; // // result: // -// x-goog-request-params: routing_id=projects/proj_foo +// x-goog-request-params: routing_id=projects/proj_foo // -// Example 5 +// # Example 5 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on (parts of) a single request @@ -208,87 +208,87 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // If the `table_name` does not have instances information, -// // take just the project id for routing. -// // Otherwise take project + instance. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*/instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// routing_id=projects/proj_foo/instances/instance_bar +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar // -// Example 6 +// # Example 6 // // Extracting multiple routing header key-value pairs by matching // several non-conflicting path templates on (parts of) a single request field. // -// Sub-example 6a +// # Sub-example 6a // // Make the templates strict, so that if the `table_name` does not // have an instance information, nothing is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code needs two keys instead of one composite -// // but works only for the tables with the "project-instance" name -// // syntax. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/instances/*/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Sub-example 6b +// # Sub-example 6b // // Make the templates loose, so that if the `table_name` does not // have an instance information, just the project id part is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code wants two keys instead of one composite -// // but will work with just the `project_id` for tables without -// // an instance in the `table_name`. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result (is the same as 6a for our example message because it has the instance // information): // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Example 7 +// # Example 7 // // Extracting multiple routing header key-value pairs by matching // several path templates on multiple request fields. @@ -301,26 +301,26 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The routing needs both `project_id` and `routing_id` -// // (from the `app_profile_id` field) for routing. +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. // -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux // -// Example 8 +// # Example 8 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on several request fields. The @@ -328,73 +328,73 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The `routing_id` can be a project id or a region id depending on -// // the table name format, but only if the `app_profile_id` is not set. -// // If `app_profile_id` is set it should be used instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=regions/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 9 +// # Example 9 // // Bringing it all together. // // annotation: // -// option (google.api.routing) = { -// // For routing both `table_location` and a `routing_id` are needed. -// // -// // table_location can be either an instance id or a region+zone id. -// // -// // For `routing_id`, take the value of `app_profile_id` -// // - If it's in the format `profiles/`, send -// // just the `` part. -// // - If it's any other literal, send it as is. -// // If the `app_profile_id` is empty, and the `table_name` starts with -// // the project_id, send that instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{table_location=instances/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_location=regions/*/zones/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "profiles/{routing_id=*}" -// } -// }; +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; // // result: // -// x-goog-request-params: -// table_location=instances/instance_bar&routing_id=prof_qux +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux type RoutingRule struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 7ea5ced87e0..af72196c809 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -40,7 +40,6 @@ const ( // payload formats that can't be represented as JSON, such as raw binary or // an HTML page. // -// // This message can be used both in streaming and non-streaming API methods in // the request as well as the response. // @@ -50,32 +49,32 @@ const ( // // Example: // -// message GetResourceRequest { -// // A unique request id. -// string request_id = 1; +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; // -// // The raw HTTP body is bound to this field. -// google.api.HttpBody http_body = 2; +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; // -// } +// } // -// service ResourceService { -// rpc GetResource(GetResourceRequest) -// returns (google.api.HttpBody); -// rpc UpdateResource(google.api.HttpBody) -// returns (google.protobuf.Empty); +// service ResourceService { +// rpc GetResource(GetResourceRequest) +// returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) +// returns (google.protobuf.Empty); // -// } +// } // // Example with streaming methods: // -// service CaldavService { -// rpc GetCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// rpc UpdateCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); // -// } +// } // // Use of this type only changes how the request and response bodies are // handled, all other features will continue to work unchanged. diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go index bedd5f24336..5869d92070e 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -203,7 +203,6 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // An Identity and Access Management (IAM) policy, which specifies access // controls for Google Cloud resources. // -// // A `Policy` is a collection of `bindings`. A `binding` binds one or more // `members`, or principals, to a single `role`. Principals can be user // accounts, service accounts, Google groups, and domains (such as G Suite). A @@ -219,51 +218,51 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // // **JSON example:** // -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } +// { +// "bindings": [ +// { +// "role": "roles/resourcemanager.organizationAdmin", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" +// ] +// }, +// { +// "role": "roles/resourcemanager.organizationViewer", +// "members": [ +// "user:eve@example.com" +// ], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// } +// } +// ], +// "etag": "BwWWja0YfJA=", +// "version": 3 +// } // // **YAML example:** // -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') -// etag: BwWWja0YfJA= -// version: 3 +// bindings: +// - members: +// - user:mike@example.com +// - group:admins@example.com +// - domain:google.com +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin +// - members: +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') +// etag: BwWWja0YfJA= +// version: 3 // // For a description of IAM and its features, see the // [IAM documentation](https://cloud.google.com/iam/docs/). @@ -519,41 +518,41 @@ func (x *Binding) GetCondition() *expr.Expr { // // Example Policy with multiple AuditConfigs: // -// { -// "audit_configs": [ -// { -// "service": "allServices", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// }, -// { -// "log_type": "ADMIN_READ" -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ" -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } +// { +// "audit_configs": [ +// { +// "service": "allServices", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// }, +// { +// "log_type": "ADMIN_READ" +// } +// ] +// }, +// { +// "service": "sampleservice.googleapis.com", +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ" +// }, +// { +// "log_type": "DATA_WRITE", +// "exempted_members": [ +// "user:aliya@example.com" +// ] +// } +// ] +// } +// ] +// } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ // logging. It also exempts jose@example.com from DATA_READ logging, and @@ -620,19 +619,19 @@ func (x *AuditConfig) GetAuditLogConfigs() []*AuditLogConfig { // Provides the configuration for logging a type of permissions. // Example: // -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// } -// ] -// } +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:jose@example.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE" +// } +// ] +// } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting // jose@example.com from DATA_READ logging. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 1258803152f..3a47b902c9a 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -37,7 +37,6 @@ const ( // The canonical error codes for gRPC APIs. // -// // Sometimes multiple error codes may apply. Services should return // the most specific error code that applies. For example, prefer // `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 1c7b93ec160..2f3ab924948 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -219,25 +219,25 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { // Example of an error when contacting the "pubsub.googleapis.com" API when it // is not enabled: // -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } // // This response indicates that the pubsub.googleapis.com API is not enabled. // // Example of an error that is returned when attempting to create a Spanner // instance in a region that is out of stock: // -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } type ErrorInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/type/color/color.pb.go b/vendor/google.golang.org/genproto/googleapis/type/color/color.pb.go deleted file mode 100644 index e4570e45202..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/color/color.pb.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/type/color.proto - -package color - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Represents a color in the RGBA color space. This representation is designed -// for simplicity of conversion to/from color representations in various -// languages over compactness. For example, the fields of this representation -// can be trivially provided to the constructor of `java.awt.Color` in Java; it -// can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` -// method in iOS; and, with just a little work, it can be easily formatted into -// a CSS `rgba()` string in JavaScript. -// -// This reference page doesn't carry information about the absolute color -// space -// that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB, -// DCI-P3, BT.2020, etc.). By default, applications should assume the sRGB color -// space. -// -// When color equality needs to be decided, implementations, unless -// documented otherwise, treat two colors as equal if all their red, -// green, blue, and alpha values each differ by at most 1e-5. -// -// Example (Java): -// -// import com.google.type.Color; -// -// // ... -// public static java.awt.Color fromProto(Color protocolor) { -// float alpha = protocolor.hasAlpha() -// ? protocolor.getAlpha().getValue() -// : 1.0; -// -// return new java.awt.Color( -// protocolor.getRed(), -// protocolor.getGreen(), -// protocolor.getBlue(), -// alpha); -// } -// -// public static Color toProto(java.awt.Color color) { -// float red = (float) color.getRed(); -// float green = (float) color.getGreen(); -// float blue = (float) color.getBlue(); -// float denominator = 255.0; -// Color.Builder resultBuilder = -// Color -// .newBuilder() -// .setRed(red / denominator) -// .setGreen(green / denominator) -// .setBlue(blue / denominator); -// int alpha = color.getAlpha(); -// if (alpha != 255) { -// result.setAlpha( -// FloatValue -// .newBuilder() -// .setValue(((float) alpha) / denominator) -// .build()); -// } -// return resultBuilder.build(); -// } -// // ... -// -// Example (iOS / Obj-C): -// -// // ... -// static UIColor* fromProto(Color* protocolor) { -// float red = [protocolor red]; -// float green = [protocolor green]; -// float blue = [protocolor blue]; -// FloatValue* alpha_wrapper = [protocolor alpha]; -// float alpha = 1.0; -// if (alpha_wrapper != nil) { -// alpha = [alpha_wrapper value]; -// } -// return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; -// } -// -// static Color* toProto(UIColor* color) { -// CGFloat red, green, blue, alpha; -// if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { -// return nil; -// } -// Color* result = [[Color alloc] init]; -// [result setRed:red]; -// [result setGreen:green]; -// [result setBlue:blue]; -// if (alpha <= 0.9999) { -// [result setAlpha:floatWrapperWithValue(alpha)]; -// } -// [result autorelease]; -// return result; -// } -// // ... -// -// Example (JavaScript): -// -// // ... -// -// var protoToCssColor = function(rgb_color) { -// var redFrac = rgb_color.red || 0.0; -// var greenFrac = rgb_color.green || 0.0; -// var blueFrac = rgb_color.blue || 0.0; -// var red = Math.floor(redFrac * 255); -// var green = Math.floor(greenFrac * 255); -// var blue = Math.floor(blueFrac * 255); -// -// if (!('alpha' in rgb_color)) { -// return rgbToCssColor(red, green, blue); -// } -// -// var alphaFrac = rgb_color.alpha.value || 0.0; -// var rgbParams = [red, green, blue].join(','); -// return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); -// }; -// -// var rgbToCssColor = function(red, green, blue) { -// var rgbNumber = new Number((red << 16) | (green << 8) | blue); -// var hexString = rgbNumber.toString(16); -// var missingZeros = 6 - hexString.length; -// var resultBuilder = ['#']; -// for (var i = 0; i < missingZeros; i++) { -// resultBuilder.push('0'); -// } -// resultBuilder.push(hexString); -// return resultBuilder.join(''); -// }; -// -// // ... -type Color struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The amount of red in the color as a value in the interval [0, 1]. - Red float32 `protobuf:"fixed32,1,opt,name=red,proto3" json:"red,omitempty"` - // The amount of green in the color as a value in the interval [0, 1]. - Green float32 `protobuf:"fixed32,2,opt,name=green,proto3" json:"green,omitempty"` - // The amount of blue in the color as a value in the interval [0, 1]. - Blue float32 `protobuf:"fixed32,3,opt,name=blue,proto3" json:"blue,omitempty"` - // The fraction of this color that should be applied to the pixel. That is, - // the final pixel color is defined by the equation: - // - // `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` - // - // This means that a value of 1.0 corresponds to a solid color, whereas - // a value of 0.0 corresponds to a completely transparent color. This - // uses a wrapper message rather than a simple float scalar so that it is - // possible to distinguish between a default value and the value being unset. - // If omitted, this color object is rendered as a solid color - // (as if the alpha value had been explicitly given a value of 1.0). - Alpha *wrapperspb.FloatValue `protobuf:"bytes,4,opt,name=alpha,proto3" json:"alpha,omitempty"` -} - -func (x *Color) Reset() { - *x = Color{} - if protoimpl.UnsafeEnabled { - mi := &file_google_type_color_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Color) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Color) ProtoMessage() {} - -func (x *Color) ProtoReflect() protoreflect.Message { - mi := &file_google_type_color_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Color.ProtoReflect.Descriptor instead. -func (*Color) Descriptor() ([]byte, []int) { - return file_google_type_color_proto_rawDescGZIP(), []int{0} -} - -func (x *Color) GetRed() float32 { - if x != nil { - return x.Red - } - return 0 -} - -func (x *Color) GetGreen() float32 { - if x != nil { - return x.Green - } - return 0 -} - -func (x *Color) GetBlue() float32 { - if x != nil { - return x.Blue - } - return 0 -} - -func (x *Color) GetAlpha() *wrapperspb.FloatValue { - if x != nil { - return x.Alpha - } - return nil -} - -var File_google_type_color_proto protoreflect.FileDescriptor - -var file_google_type_color_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x6f, - 0x6c, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x76, 0x0a, 0x05, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x12, - 0x10, 0x0a, 0x03, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, 0x72, 0x65, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x72, 0x65, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, - 0x52, 0x05, 0x67, 0x72, 0x65, 0x65, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6c, 0x75, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x62, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x6c, 0x6f, - 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x60, - 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x42, 0x0a, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x63, 0x6f, 0x6c, 0x6f, - 0x72, 0x3b, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_color_proto_rawDescOnce sync.Once - file_google_type_color_proto_rawDescData = file_google_type_color_proto_rawDesc -) - -func file_google_type_color_proto_rawDescGZIP() []byte { - file_google_type_color_proto_rawDescOnce.Do(func() { - file_google_type_color_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_color_proto_rawDescData) - }) - return file_google_type_color_proto_rawDescData -} - -var file_google_type_color_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_type_color_proto_goTypes = []interface{}{ - (*Color)(nil), // 0: google.type.Color - (*wrapperspb.FloatValue)(nil), // 1: google.protobuf.FloatValue -} -var file_google_type_color_proto_depIdxs = []int32{ - 1, // 0: google.type.Color.alpha:type_name -> google.protobuf.FloatValue - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_type_color_proto_init() } -func file_google_type_color_proto_init() { - if File_google_type_color_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_type_color_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Color); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_color_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_color_proto_goTypes, - DependencyIndexes: file_google_type_color_proto_depIdxs, - MessageInfos: file_google_type_color_proto_msgTypes, - }.Build() - File_google_type_color_proto = out.File - file_google_type_color_proto_rawDesc = nil - file_google_type_color_proto_goTypes = nil - file_google_type_color_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/type/datetime/datetime.pb.go b/vendor/google.golang.org/genproto/googleapis/type/datetime/datetime.pb.go deleted file mode 100644 index bc417cce867..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/datetime/datetime.pb.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/type/datetime.proto - -package datetime - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Represents civil time (or occasionally physical time). -// -// This type can represent a civil time in one of a few possible ways: -// -// * When utc_offset is set and time_zone is unset: a civil time on a calendar -// day with a particular offset from UTC. -// * When time_zone is set and utc_offset is unset: a civil time on a calendar -// day in a particular time zone. -// * When neither time_zone nor utc_offset is set: a civil time on a calendar -// day in local time. -// -// The date is relative to the Proleptic Gregorian Calendar. -// -// If year is 0, the DateTime is considered not to have a specific year. month -// and day must have valid, non-zero values. -// -// This type may also be used to represent a physical time if all the date and -// time fields are set and either case of the `time_offset` oneof is set. -// Consider using `Timestamp` message for physical time instead. If your use -// case also would like to store the user's timezone, that can be done in -// another field. -// -// This type is more flexible than some applications may want. Make sure to -// document and validate your application's limitations. -type DateTime struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a - // datetime without a year. - Year int32 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"` - // Required. Month of year. Must be from 1 to 12. - Month int32 `protobuf:"varint,2,opt,name=month,proto3" json:"month,omitempty"` - // Required. Day of month. Must be from 1 to 31 and valid for the year and - // month. - Day int32 `protobuf:"varint,3,opt,name=day,proto3" json:"day,omitempty"` - // Required. Hours of day in 24 hour format. Should be from 0 to 23. An API - // may choose to allow the value "24:00:00" for scenarios like business - // closing time. - Hours int32 `protobuf:"varint,4,opt,name=hours,proto3" json:"hours,omitempty"` - // Required. Minutes of hour of day. Must be from 0 to 59. - Minutes int32 `protobuf:"varint,5,opt,name=minutes,proto3" json:"minutes,omitempty"` - // Required. Seconds of minutes of the time. Must normally be from 0 to 59. An - // API may allow the value 60 if it allows leap-seconds. - Seconds int32 `protobuf:"varint,6,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Required. Fractions of seconds in nanoseconds. Must be from 0 to - // 999,999,999. - Nanos int32 `protobuf:"varint,7,opt,name=nanos,proto3" json:"nanos,omitempty"` - // Optional. Specifies either the UTC offset or the time zone of the DateTime. - // Choose carefully between them, considering that time zone data may change - // in the future (for example, a country modifies their DST start/end dates, - // and future DateTimes in the affected range had already been stored). - // If omitted, the DateTime is considered to be in local time. - // - // Types that are assignable to TimeOffset: - // *DateTime_UtcOffset - // *DateTime_TimeZone - TimeOffset isDateTime_TimeOffset `protobuf_oneof:"time_offset"` -} - -func (x *DateTime) Reset() { - *x = DateTime{} - if protoimpl.UnsafeEnabled { - mi := &file_google_type_datetime_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DateTime) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DateTime) ProtoMessage() {} - -func (x *DateTime) ProtoReflect() protoreflect.Message { - mi := &file_google_type_datetime_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DateTime.ProtoReflect.Descriptor instead. -func (*DateTime) Descriptor() ([]byte, []int) { - return file_google_type_datetime_proto_rawDescGZIP(), []int{0} -} - -func (x *DateTime) GetYear() int32 { - if x != nil { - return x.Year - } - return 0 -} - -func (x *DateTime) GetMonth() int32 { - if x != nil { - return x.Month - } - return 0 -} - -func (x *DateTime) GetDay() int32 { - if x != nil { - return x.Day - } - return 0 -} - -func (x *DateTime) GetHours() int32 { - if x != nil { - return x.Hours - } - return 0 -} - -func (x *DateTime) GetMinutes() int32 { - if x != nil { - return x.Minutes - } - return 0 -} - -func (x *DateTime) GetSeconds() int32 { - if x != nil { - return x.Seconds - } - return 0 -} - -func (x *DateTime) GetNanos() int32 { - if x != nil { - return x.Nanos - } - return 0 -} - -func (m *DateTime) GetTimeOffset() isDateTime_TimeOffset { - if m != nil { - return m.TimeOffset - } - return nil -} - -func (x *DateTime) GetUtcOffset() *durationpb.Duration { - if x, ok := x.GetTimeOffset().(*DateTime_UtcOffset); ok { - return x.UtcOffset - } - return nil -} - -func (x *DateTime) GetTimeZone() *TimeZone { - if x, ok := x.GetTimeOffset().(*DateTime_TimeZone); ok { - return x.TimeZone - } - return nil -} - -type isDateTime_TimeOffset interface { - isDateTime_TimeOffset() -} - -type DateTime_UtcOffset struct { - // UTC offset. Must be whole seconds, between -18 hours and +18 hours. - // For example, a UTC offset of -4:00 would be represented as - // { seconds: -14400 }. - UtcOffset *durationpb.Duration `protobuf:"bytes,8,opt,name=utc_offset,json=utcOffset,proto3,oneof"` -} - -type DateTime_TimeZone struct { - // Time zone. - TimeZone *TimeZone `protobuf:"bytes,9,opt,name=time_zone,json=timeZone,proto3,oneof"` -} - -func (*DateTime_UtcOffset) isDateTime_TimeOffset() {} - -func (*DateTime_TimeZone) isDateTime_TimeOffset() {} - -// Represents a time zone from the -// [IANA Time Zone Database](https://www.iana.org/time-zones). -type TimeZone struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // IANA Time Zone Database time zone, e.g. "America/New_York". - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Optional. IANA Time Zone Database version number, e.g. "2019a". - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` -} - -func (x *TimeZone) Reset() { - *x = TimeZone{} - if protoimpl.UnsafeEnabled { - mi := &file_google_type_datetime_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeZone) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeZone) ProtoMessage() {} - -func (x *TimeZone) ProtoReflect() protoreflect.Message { - mi := &file_google_type_datetime_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeZone.ProtoReflect.Descriptor instead. -func (*TimeZone) Descriptor() ([]byte, []int) { - return file_google_type_datetime_proto_rawDescGZIP(), []int{1} -} - -func (x *TimeZone) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *TimeZone) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -var File_google_type_datetime_proto protoreflect.FileDescriptor - -var file_google_type_datetime_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, - 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, 0x02, 0x0a, 0x08, 0x44, 0x61, - 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x65, 0x61, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x79, 0x65, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, - 0x6e, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, - 0x12, 0x10, 0x0a, 0x03, 0x64, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, - 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x68, 0x6f, 0x75, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x68, 0x6f, 0x75, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x75, 0x74, 0x63, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x09, 0x75, 0x74, 0x63, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x34, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x48, 0x00, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, - 0x5a, 0x6f, 0x6e, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x22, 0x34, 0x0a, 0x08, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0d, 0x44, 0x61, - 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69, - 0x6d, 0x65, 0x3b, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_datetime_proto_rawDescOnce sync.Once - file_google_type_datetime_proto_rawDescData = file_google_type_datetime_proto_rawDesc -) - -func file_google_type_datetime_proto_rawDescGZIP() []byte { - file_google_type_datetime_proto_rawDescOnce.Do(func() { - file_google_type_datetime_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_datetime_proto_rawDescData) - }) - return file_google_type_datetime_proto_rawDescData -} - -var file_google_type_datetime_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_google_type_datetime_proto_goTypes = []interface{}{ - (*DateTime)(nil), // 0: google.type.DateTime - (*TimeZone)(nil), // 1: google.type.TimeZone - (*durationpb.Duration)(nil), // 2: google.protobuf.Duration -} -var file_google_type_datetime_proto_depIdxs = []int32{ - 2, // 0: google.type.DateTime.utc_offset:type_name -> google.protobuf.Duration - 1, // 1: google.type.DateTime.time_zone:type_name -> google.type.TimeZone - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_google_type_datetime_proto_init() } -func file_google_type_datetime_proto_init() { - if File_google_type_datetime_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_type_datetime_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DateTime); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_type_datetime_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeZone); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_type_datetime_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*DateTime_UtcOffset)(nil), - (*DateTime_TimeZone)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_datetime_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_datetime_proto_goTypes, - DependencyIndexes: file_google_type_datetime_proto_depIdxs, - MessageInfos: file_google_type_datetime_proto_msgTypes, - }.Build() - File_google_type_datetime_proto = out.File - file_google_type_datetime_proto_rawDesc = nil - file_google_type_datetime_proto_goTypes = nil - file_google_type_datetime_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/type/decimal/decimal.pb.go b/vendor/google.golang.org/genproto/googleapis/type/decimal/decimal.pb.go deleted file mode 100644 index cd96d3f9890..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/decimal/decimal.pb.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/type/decimal.proto - -package decimal - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A representation of a decimal value, such as 2.5. Clients may convert values -// into language-native decimal formats, such as Java's [BigDecimal][] or -// Python's [decimal.Decimal][]. -// -// [BigDecimal]: -// https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html -// [decimal.Decimal]: https://docs.python.org/3/library/decimal.html -type Decimal struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The decimal value, as a string. - // - // The string representation consists of an optional sign, `+` (`U+002B`) - // or `-` (`U+002D`), followed by a sequence of zero or more decimal digits - // ("the integer"), optionally followed by a fraction, optionally followed - // by an exponent. - // - // The fraction consists of a decimal point followed by zero or more decimal - // digits. The string must contain at least one digit in either the integer - // or the fraction. The number formed by the sign, the integer and the - // fraction is referred to as the significand. - // - // The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) - // followed by one or more decimal digits. - // - // Services **should** normalize decimal values before storing them by: - // - // - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - // - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - // - Coercing the exponent character to lower-case (`2.5E8` -> `2.5e8`). - // - Removing an explicitly-provided zero exponent (`2.5e0` -> `2.5`). - // - // Services **may** perform additional normalization based on its own needs - // and the internal decimal implementation selected, such as shifting the - // decimal point and exponent value together (example: `2.5e-1` <-> `0.25`). - // Additionally, services **may** preserve trailing zeroes in the fraction - // to indicate increased precision, but are not required to do so. - // - // Note that only the `.` character is supported to divide the integer - // and the fraction; `,` **should not** be supported regardless of locale. - // Additionally, thousand separators **should not** be supported. If a - // service does support them, values **must** be normalized. - // - // The ENBF grammar is: - // - // DecimalString = - // [Sign] Significand [Exponent]; - // - // Sign = '+' | '-'; - // - // Significand = - // Digits ['.'] [Digits] | [Digits] '.' Digits; - // - // Exponent = ('e' | 'E') [Sign] Digits; - // - // Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; - // - // Services **should** clearly document the range of supported values, the - // maximum supported precision (total number of digits), and, if applicable, - // the scale (number of digits after the decimal point), as well as how it - // behaves when receiving out-of-bounds values. - // - // Services **may** choose to accept values passed as input even when the - // value has a higher precision or scale than the service supports, and - // **should** round the value to fit the supported scale. Alternatively, the - // service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) - // if precision would be lost. - // - // Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in - // gRPC) if the service receives a value outside of the supported range. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Decimal) Reset() { - *x = Decimal{} - if protoimpl.UnsafeEnabled { - mi := &file_google_type_decimal_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Decimal) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Decimal) ProtoMessage() {} - -func (x *Decimal) ProtoReflect() protoreflect.Message { - mi := &file_google_type_decimal_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Decimal.ProtoReflect.Descriptor instead. -func (*Decimal) Descriptor() ([]byte, []int) { - return file_google_type_decimal_proto_rawDescGZIP(), []int{0} -} - -func (x *Decimal) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -var File_google_type_decimal_proto protoreflect.FileDescriptor - -var file_google_type_decimal_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x65, - 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x69, - 0x6d, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x66, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0c, 0x44, 0x65, - 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, - 0x3b, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, - 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_decimal_proto_rawDescOnce sync.Once - file_google_type_decimal_proto_rawDescData = file_google_type_decimal_proto_rawDesc -) - -func file_google_type_decimal_proto_rawDescGZIP() []byte { - file_google_type_decimal_proto_rawDescOnce.Do(func() { - file_google_type_decimal_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_decimal_proto_rawDescData) - }) - return file_google_type_decimal_proto_rawDescData -} - -var file_google_type_decimal_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_type_decimal_proto_goTypes = []interface{}{ - (*Decimal)(nil), // 0: google.type.Decimal -} -var file_google_type_decimal_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_type_decimal_proto_init() } -func file_google_type_decimal_proto_init() { - if File_google_type_decimal_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_type_decimal_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decimal); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_decimal_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_decimal_proto_goTypes, - DependencyIndexes: file_google_type_decimal_proto_depIdxs, - MessageInfos: file_google_type_decimal_proto_msgTypes, - }.Build() - File_google_type_decimal_proto = out.File - file_google_type_decimal_proto_rawDesc = nil - file_google_type_decimal_proto_goTypes = nil - file_google_type_decimal_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 2857e26eb38..38ef56f73ca 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -41,27 +41,27 @@ const ( // // Example (Comparison): // -// title: "Summary size limit" -// description: "Determines if a summary is less than 100 chars" -// expression: "document.summary.size() < 100" +// title: "Summary size limit" +// description: "Determines if a summary is less than 100 chars" +// expression: "document.summary.size() < 100" // // Example (Equality): // -// title: "Requestor is owner" -// description: "Determines if requestor is the document owner" -// expression: "document.owner == request.auth.claims.email" +// title: "Requestor is owner" +// description: "Determines if requestor is the document owner" +// expression: "document.owner == request.auth.claims.email" // // Example (Logic): // -// title: "Public documents" -// description: "Determine whether the document should be publicly visible" -// expression: "document.type != 'private' && document.type != 'internal'" +// title: "Public documents" +// description: "Determine whether the document should be publicly visible" +// expression: "document.type != 'private' && document.type != 'internal'" // // Example (Data Manipulation): // -// title: "Notification string" -// description: "Create a notification string with a timestamp." -// expression: "'New message received at ' + string(document.create_time)" +// title: "Notification string" +// description: "Create a notification string with a timestamp." +// expression: "'New message received at ' + string(document.create_time)" // // The exact variables and functions that may be referenced within an expression // are determined by the service that evaluates it. See the service diff --git a/vendor/google.golang.org/genproto/googleapis/type/fraction/fraction.pb.go b/vendor/google.golang.org/genproto/googleapis/type/fraction/fraction.pb.go deleted file mode 100644 index 4db64bc724e..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/fraction/fraction.pb.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/type/fraction.proto - -package fraction - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Represents a fraction in terms of a numerator divided by a denominator. -type Fraction struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The numerator in the fraction, e.g. 2 in 2/3. - Numerator int64 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"` - // The value by which the numerator is divided, e.g. 3 in 2/3. Must be - // positive. - Denominator int64 `protobuf:"varint,2,opt,name=denominator,proto3" json:"denominator,omitempty"` -} - -func (x *Fraction) Reset() { - *x = Fraction{} - if protoimpl.UnsafeEnabled { - mi := &file_google_type_fraction_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Fraction) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Fraction) ProtoMessage() {} - -func (x *Fraction) ProtoReflect() protoreflect.Message { - mi := &file_google_type_fraction_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Fraction.ProtoReflect.Descriptor instead. -func (*Fraction) Descriptor() ([]byte, []int) { - return file_google_type_fraction_proto_rawDescGZIP(), []int{0} -} - -func (x *Fraction) GetNumerator() int64 { - if x != nil { - return x.Numerator - } - return 0 -} - -func (x *Fraction) GetDenominator() int64 { - if x != nil { - return x.Denominator - } - return 0 -} - -var File_google_type_fraction_proto protoreflect.FileDescriptor - -var file_google_type_fraction_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x66, 0x72, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x4a, 0x0a, 0x08, 0x46, 0x72, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x61, - 0x74, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, - 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x66, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0d, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x66, - 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_fraction_proto_rawDescOnce sync.Once - file_google_type_fraction_proto_rawDescData = file_google_type_fraction_proto_rawDesc -) - -func file_google_type_fraction_proto_rawDescGZIP() []byte { - file_google_type_fraction_proto_rawDescOnce.Do(func() { - file_google_type_fraction_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_fraction_proto_rawDescData) - }) - return file_google_type_fraction_proto_rawDescData -} - -var file_google_type_fraction_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_type_fraction_proto_goTypes = []interface{}{ - (*Fraction)(nil), // 0: google.type.Fraction -} -var file_google_type_fraction_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_type_fraction_proto_init() } -func file_google_type_fraction_proto_init() { - if File_google_type_fraction_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_type_fraction_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Fraction); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_fraction_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_fraction_proto_goTypes, - DependencyIndexes: file_google_type_fraction_proto_depIdxs, - MessageInfos: file_google_type_fraction_proto_msgTypes, - }.Build() - File_google_type_fraction_proto = out.File - file_google_type_fraction_proto_rawDesc = nil - file_google_type_fraction_proto_goTypes = nil - file_google_type_fraction_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/type/month/month.pb.go b/vendor/google.golang.org/genproto/googleapis/type/month/month.pb.go deleted file mode 100644 index 0c6badd6dee..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/type/month/month.pb.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/type/month.proto - -package month - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Represents a month in the Gregorian calendar. -type Month int32 - -const ( - // The unspecified month. - Month_MONTH_UNSPECIFIED Month = 0 - // The month of January. - Month_JANUARY Month = 1 - // The month of February. - Month_FEBRUARY Month = 2 - // The month of March. - Month_MARCH Month = 3 - // The month of April. - Month_APRIL Month = 4 - // The month of May. - Month_MAY Month = 5 - // The month of June. - Month_JUNE Month = 6 - // The month of July. - Month_JULY Month = 7 - // The month of August. - Month_AUGUST Month = 8 - // The month of September. - Month_SEPTEMBER Month = 9 - // The month of October. - Month_OCTOBER Month = 10 - // The month of November. - Month_NOVEMBER Month = 11 - // The month of December. - Month_DECEMBER Month = 12 -) - -// Enum value maps for Month. -var ( - Month_name = map[int32]string{ - 0: "MONTH_UNSPECIFIED", - 1: "JANUARY", - 2: "FEBRUARY", - 3: "MARCH", - 4: "APRIL", - 5: "MAY", - 6: "JUNE", - 7: "JULY", - 8: "AUGUST", - 9: "SEPTEMBER", - 10: "OCTOBER", - 11: "NOVEMBER", - 12: "DECEMBER", - } - Month_value = map[string]int32{ - "MONTH_UNSPECIFIED": 0, - "JANUARY": 1, - "FEBRUARY": 2, - "MARCH": 3, - "APRIL": 4, - "MAY": 5, - "JUNE": 6, - "JULY": 7, - "AUGUST": 8, - "SEPTEMBER": 9, - "OCTOBER": 10, - "NOVEMBER": 11, - "DECEMBER": 12, - } -) - -func (x Month) Enum() *Month { - p := new(Month) - *p = x - return p -} - -func (x Month) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Month) Descriptor() protoreflect.EnumDescriptor { - return file_google_type_month_proto_enumTypes[0].Descriptor() -} - -func (Month) Type() protoreflect.EnumType { - return &file_google_type_month_proto_enumTypes[0] -} - -func (x Month) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Month.Descriptor instead. -func (Month) EnumDescriptor() ([]byte, []int) { - return file_google_type_month_proto_rawDescGZIP(), []int{0} -} - -var File_google_type_month_proto protoreflect.FileDescriptor - -var file_google_type_month_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x6f, - 0x6e, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2a, 0xb0, 0x01, 0x0a, 0x05, 0x4d, 0x6f, 0x6e, 0x74, 0x68, - 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, 0x4e, 0x54, 0x48, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x41, 0x4e, 0x55, 0x41, - 0x52, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x46, 0x45, 0x42, 0x52, 0x55, 0x41, 0x52, 0x59, - 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x41, 0x52, 0x43, 0x48, 0x10, 0x03, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x50, 0x52, 0x49, 0x4c, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x59, 0x10, - 0x05, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x55, 0x4e, 0x45, 0x10, 0x06, 0x12, 0x08, 0x0a, 0x04, 0x4a, - 0x55, 0x4c, 0x59, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x55, 0x47, 0x55, 0x53, 0x54, 0x10, - 0x08, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x50, 0x54, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x09, - 0x12, 0x0b, 0x0a, 0x07, 0x4f, 0x43, 0x54, 0x4f, 0x42, 0x45, 0x52, 0x10, 0x0a, 0x12, 0x0c, 0x0a, - 0x08, 0x4e, 0x4f, 0x56, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x0c, 0x0a, 0x08, 0x44, - 0x45, 0x43, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x0c, 0x42, 0x5d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x0a, 0x4d, 0x6f, - 0x6e, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, - 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x3b, 0x6d, 0x6f, 0x6e, - 0x74, 0x68, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_type_month_proto_rawDescOnce sync.Once - file_google_type_month_proto_rawDescData = file_google_type_month_proto_rawDesc -) - -func file_google_type_month_proto_rawDescGZIP() []byte { - file_google_type_month_proto_rawDescOnce.Do(func() { - file_google_type_month_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_month_proto_rawDescData) - }) - return file_google_type_month_proto_rawDescData -} - -var file_google_type_month_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_type_month_proto_goTypes = []interface{}{ - (Month)(0), // 0: google.type.Month -} -var file_google_type_month_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_type_month_proto_init() } -func file_google_type_month_proto_init() { - if File_google_type_month_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_type_month_proto_rawDesc, - NumEnums: 1, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_type_month_proto_goTypes, - DependencyIndexes: file_google_type_month_proto_depIdxs, - EnumInfos: file_google_type_month_proto_enumTypes, - }.Build() - File_google_type_month_proto = out.File - file_google_type_month_proto_rawDesc = nil - file_google_type_month_proto_goTypes = nil - file_google_type_month_proto_depIdxs = nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 98137a7737c..4cad314af46 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# cloud.google.com/go v0.102.0 -## explicit; go 1.15 +# cloud.google.com/go v0.104.0 +## explicit; go 1.17 cloud.google.com/go cloud.google.com/go/internal cloud.google.com/go/internal/optional @@ -11,11 +11,12 @@ cloud.google.com/go/compute/metadata # cloud.google.com/go/iam v0.3.0 ## explicit; go 1.15 cloud.google.com/go/iam -# cloud.google.com/go/storage v1.22.1 -## explicit; go 1.15 +# cloud.google.com/go/storage v1.27.0 +## explicit; go 1.17 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 +cloud.google.com/go/storage/internal/apiv2/stubs # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore @@ -100,7 +101,7 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.44.72 +# github.com/aws/aws-sdk-go v1.44.109 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -393,15 +394,15 @@ github.com/golang/snappy # github.com/google/btree v1.0.1 ## explicit; go 1.12 github.com/google/btree -# github.com/google/go-cmp v0.5.8 +# github.com/google/go-cmp v0.5.9 ## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20220729232143-a41b82acbcb1 -## explicit; go 1.17 +# github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 +## explicit; go 1.18 github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit @@ -410,21 +411,16 @@ github.com/google/uuid ## explicit; go 1.18 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.4.0 -## explicit; go 1.15 +# github.com/googleapis/gax-go/v2 v2.5.1 +## explicit; go 1.18 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/internal -# github.com/googleapis/gnostic v0.0.0-00010101000000-000000000000 => github.com/google/gnostic v0.6.9 -## explicit; go 1.12 -# github.com/googleapis/go-type-adapters v1.0.0 -## explicit; go 1.11 -github.com/googleapis/go-type-adapters/adapters # github.com/gorilla/mux v1.8.0 ## explicit; go 1.12 github.com/gorilla/mux -# github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 +# github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 ## explicit; go 1.17 github.com/grafana/regexp github.com/grafana/regexp/syntax @@ -438,12 +434,12 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/tags github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/tracing github.com/grpc-ecosystem/go-grpc-middleware/v2/util/metautils -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 ## explicit; go 1.17 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/hashicorp/consul/api v1.14.0 +# github.com/hashicorp/consul/api v1.15.2 ## explicit; go 1.12 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.1.0 @@ -679,8 +675,8 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20220616150932-0d0e44d7e65f -## explicit; go 1.17 +# github.com/prometheus/prometheus v0.39.1 +## explicit; go 1.18 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery/dns @@ -689,6 +685,7 @@ github.com/prometheus/prometheus/discovery/refresh github.com/prometheus/prometheus/discovery/targetgroup github.com/prometheus/prometheus/model/exemplar github.com/prometheus/prometheus/model/labels +github.com/prometheus/prometheus/model/metadata github.com/prometheus/prometheus/model/relabel github.com/prometheus/prometheus/model/rulefmt github.com/prometheus/prometheus/model/textparse @@ -768,7 +765,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/objstore v0.0.0-20220923084403-cec51c61948b +# github.com/thanos-io/objstore v0.0.0-20221006135717-79dcec7fe604 ## explicit; go 1.18 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp @@ -778,7 +775,7 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing -# github.com/thanos-io/thanos v0.27.0-rc.0.0.20220929152722-4d764443cde3 +# github.com/thanos-io/thanos v0.27.0-rc.0.0.20221007170555-a4e334152549 ## explicit; go 1.18 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader @@ -897,7 +894,7 @@ go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials go.etcd.io/etcd/client/v3/internal/endpoint go.etcd.io/etcd/client/v3/internal/resolver -# go.mongodb.org/mongo-driver v1.10.0 +# go.mongodb.org/mongo-driver v1.10.2 ## explicit; go 1.10 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -925,14 +922,23 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 ## explicit; go 1.17 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +# go.opentelemetry.io/contrib/propagators/autoprop v0.34.0 +## explicit; go 1.17 +go.opentelemetry.io/contrib/propagators/autoprop # go.opentelemetry.io/contrib/propagators/aws v1.9.0 ## explicit; go 1.17 go.opentelemetry.io/contrib/propagators/aws/xray -# go.opentelemetry.io/contrib/propagators/ot v1.4.0 -## explicit; go 1.16 +# go.opentelemetry.io/contrib/propagators/b3 v1.9.0 +## explicit; go 1.17 +go.opentelemetry.io/contrib/propagators/b3 +# go.opentelemetry.io/contrib/propagators/jaeger v1.9.0 +## explicit; go 1.17 +go.opentelemetry.io/contrib/propagators/jaeger +# go.opentelemetry.io/contrib/propagators/ot v1.9.0 +## explicit; go 1.17 go.opentelemetry.io/contrib/propagators/ot # go.opentelemetry.io/otel v1.10.0 ## explicit; go 1.17 @@ -952,18 +958,18 @@ go.opentelemetry.io/otel/semconv/v1.12.0 ## explicit; go 1.17 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration -# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.9.0 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 ## explicit; go 1.17 go.opentelemetry.io/otel/exporters/otlp/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.9.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 ## explicit; go 1.17 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.9.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 ## explicit; go 1.17 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc -# go.opentelemetry.io/otel/metric v0.31.0 +# go.opentelemetry.io/otel/metric v0.32.0 ## explicit; go 1.17 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/global @@ -974,27 +980,28 @@ go.opentelemetry.io/otel/metric/instrument/syncfloat64 go.opentelemetry.io/otel/metric/instrument/syncint64 go.opentelemetry.io/otel/metric/internal/global go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/sdk v1.9.0 +# go.opentelemetry.io/otel/sdk v1.10.0 ## explicit; go 1.17 go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace +go.opentelemetry.io/otel/sdk/trace/tracetest # go.opentelemetry.io/otel/trace v1.10.0 ## explicit; go 1.17 go.opentelemetry.io/otel/trace -# go.opentelemetry.io/proto/otlp v0.18.0 +# go.opentelemetry.io/proto/otlp v0.19.0 ## explicit; go 1.14 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# go.uber.org/atomic v1.9.0 -## explicit; go 1.13 +# go.uber.org/atomic v1.10.0 +## explicit; go 1.18 go.uber.org/atomic -# go.uber.org/goleak v1.1.12 -## explicit; go 1.13 +# go.uber.org/goleak v1.2.0 +## explicit; go 1.18 go.uber.org/goleak go.uber.org/goleak/internal/stack # go.uber.org/multierr v1.8.0 @@ -1020,7 +1027,7 @@ golang.org/x/crypto/pkcs12/internal/rc2 # golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced +# golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -1037,7 +1044,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 +# golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1047,10 +1054,10 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 +# golang.org/x/sync v0.0.0-20220907140024-f12130a52804 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 +# golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1064,7 +1071,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 +# golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 ## explicit golang.org/x/time/rate # golang.org/x/tools v0.1.12 @@ -1086,7 +1093,7 @@ golang.org/x/tools/internal/typesinternal ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.91.0 +# google.golang.org/api v0.97.0 ## explicit; go 1.15 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1119,7 +1126,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 +# google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody @@ -1127,16 +1134,10 @@ google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/googleapis/storage/v2 -google.golang.org/genproto/googleapis/type/color google.golang.org/genproto/googleapis/type/date -google.golang.org/genproto/googleapis/type/datetime -google.golang.org/genproto/googleapis/type/decimal google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/googleapis/type/fraction -google.golang.org/genproto/googleapis/type/month google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.48.0 => google.golang.org/grpc v1.45.0 +# google.golang.org/grpc v1.49.0 => google.golang.org/grpc v1.45.0 ## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1253,8 +1254,6 @@ gopkg.in/yaml.v3 ## explicit; go 1.12 sigs.k8s.io/yaml # git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 -# k8s.io/client-go => k8s.io/client-go v0.21.3 -# k8s.io/api => k8s.io/api v0.21.3 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b